query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
get box sizes that are wither 2N or 32N, within the limits set by the user
def _get_box_sizes(self, image_info, cat): file_id=0 impath=image_info['image_path'][file_id].strip() ext=image_info['image_ext'][file_id] wcs_data = fitsio.read_header(impath, ext=ext) wcs = eu.wcsutil.WCS(wcs_data) jacob = wcs.get_jacobian(100,100) dudcol, dudrow, dvdcol, dvdrow = jacob det = dvdrow*dudcol - dvdcol*dudrow pixel_scale = np.sqrt(abs(det)) print('found pixel scale:',pixel_scale) box_size = cat['box_size_arcsec']/pixel_scale # clip to range box_size.clip( min=self['min_box_size'], max=self['max_box_size'], out=box_size, ) box_size = box_size.astype('i4') w,=np.where( ( (box_size % 2) != 0 ) ) if w.size > 0: box_size[w] += 1 return box_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_compute_box_size(self):\n def compute_best_size_for(dim):\n size = ((self.element_space[dim]-1)//self.box_space[dim]) + 1\n size += 2 * self.ghost_space[dim]\n while size % Level.BOX_ALIGNMENTS[dim]:\n size += 1\n return size\n\n return Space([compute_best_size_for(dim) for dim in range(self.dimensions)])", "def _get_block_sizes(resnet_size):\n choices = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3],\n 200: [3, 24, 36, 3]\n }\n\n try:\n return choices[resnet_size]\n except KeyError:\n err = ('Could not find layers for selected Resnet size.\\n'\n 'Size received: {}; sizes allowed: {}.'.format(\n resnet_size, list(choices.keys())))\n raise ValueError(err)", "def _get_block_sizes(resnet_size):\n choices = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3],\n 200: [3, 24, 36, 3]\n }\n\n try:\n return choices[resnet_size]\n except KeyError:\n err = (\n 'Could not find layers for selected Resnet size.\\n'\n 'Size received: {}; sizes allowed: {}.'.format(\n resnet_size, choices.keys()))\n raise ValueError(err)", "def guess_box_size(xyz):\n return np.round(np.max(xyz[:, 1] - np.min(xyz[:, 1]), 0))", "def _acceptable_dimensions(self, box):\n return self._min_width < box.x1-box.x0 < self._max_width and\\\n self._min_height < box.y1-box.y0 < self._max_height", "def box_size(self) -> np.ndarray:\n return self.upper - self.lower + 1", "def box_size(self) -> np.ndarray:\n return self.upper - self.lower + 1", "def _get_block_sizes(self, resnet_size):\n choices = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3],\n 200: [3, 24, 36, 3]\n }\n try:\n return choices[resnet_size]\n except KeyError:\n err = ('Could not find layers for selected Resnet size.\\n'\n 'Size received: {}; sizes allowed: {}.'.format(\n resnet_size, choices.keys()))\n raise ValueError(err)", "def get_boxes():\n boxes = []\n\n box_sizes = [256]\n left_x_cords = [x for x in range(0,1280,12)]\n top_y_cords = [y for y in range(360,720,12)]\n\n for box_size in box_sizes:\n for x_cord in left_x_cords:\n for y_cord in top_y_cords:\n if box_size+x_cord < 1280 and box_size+y_cord < 720:\n boxes.append([x_cord, y_cord, x_cord+box_size, y_cord+box_size])\n\n return boxes", "def GetNiceExtentsBySpacing(minval,maxval,spacing,tolerance):\n pass", "def setBoxsize(length,width,height):\n return length,width,height", "def getLayoutDimensions(n, pref=\"height\"):\n nopt = np.sqrt(n)\n inoptw = int(nopt)\n inopth = int(nopt)\n while inoptw * inopth < n:\n if pref == \"width\":\n inoptw += 1\n if inoptw * inopth > (n - inopth):\n inoptw -= 1\n inopth += 1\n else:\n inopth += 1\n if inoptw * inopth > (n - inoptw):\n inopth -= 1\n inoptw += 1\n\n return (inopth, inoptw)", "def getBoxsize(stepCount,stepHeight,stepWidth,platformWidth,stairsLength,distance):\n #///重新给box的三个属性赋值\n box_width = (stepCount-1)*stepWidth + platformWidth\n box_length = (stairsLength*2+distance) \n #distance = box_length-stairsLength*2\n box_height = stepCount*2*stepHeight\n #print (\"box_length:%s,box_width:%s,box_height:%s\"%(box_length,box_width,box_height))\n return box_length,box_width,box_height", "def block_sizes(max_size):\n if max_size > 8:\n raise ValueError(\"Invalid max_size value specified!\")\n else:\n return [f\"{2**x}x{2**y}\" for x in range(2, max_size) for y in range(2, max_size) if x != 2 or y != 2]", "def box_sz(b):\n #taken from fastai\n return ((b[:, 2]-b[:, 0]) * (b[:, 3]-b[:, 1]))", "def get_bounding_box_size(images):\n height = max(image.shape[0] for image in images)\n width = max(image.shape[1] for image in images)\n return height, width", "def get_bounding_box_size(images):\n height = max(image.shape[0] for image in images)\n width = max(image.shape[1] for image in images)\n return height, width", "def dimensions():", "def get_term_dimensions():\n height, width = subprocess.check_output(SIZE).split()\n return int(width), int(height)", "def maxSize():\n rect = pf.app.desktop().availableGeometry()\n maxh,maxw = rect.width(),rect.height()\n return maxh,maxw", "def getDimensions():", "def _get_size_var(self):\n size_var = []\n for index in range(self._nr_args):\n restriction = self._domain_restricion[index]\n size_var.append(utils.get_nr_bits(restriction, self._precision))\n return size_var", "def getScaledDimensions(size, max_size, returnFactor=False):\n\n width, height = size\n max_width, max_height = max_size\n if (max_width, max_height) == (0, 0) or (width, height) == (0, 0): return (0, 0)\n wfactor, hfactor = 1.0, 1.0\n\n if width > max_width: wfactor = float(max_width) / width\n if height > max_height: hfactor = float(max_height) / height\n\n factor = min(wfactor, hfactor)\n\n size = (width * factor, height * factor)\n\n if not returnFactor:\n return size\n else:\n return size, factor", "def boxToExtent(box):\n b = normalizeBox(box)\n return (b[0], b[1], b[0]+b[2]-1, b[1]+b[3]-1)", "def get_size_of_grid(self):\n row = 0\n column = 0\n if int(self.var1.get()) == 1:\n row, column = 6, 6\n\n if int(self.var2.get()) == 1:\n row, column = 7, 6\n\n if int(self.var3.get()) == 1:\n row, column = 7, 7\n\n if int(self.var4.get()) == 1:\n row, column = 8, 8\n\n return row, column", "def scale_box(box, img_size):\n xscale = img_size[0] / FLAGS.size\n yscale = img_size[1] / FLAGS.size\n x0, y0, x1, y1 = box\n return [\n float(x0) * xscale,\n float(y0) * yscale,\n float(x1) * xscale,\n float(y1) * yscale,\n ]", "def dimensions_of_box(box: ndarray) -> Tuple[float, float]:\n\n (top_left, _, bottom_right, _) = box\n\n (x1, y1) = top_left\n (x2, y2) = bottom_right\n\n return (x2 - x1, y2 - y1)", "def _filter_boxes2(boxes, max_size, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n if max_size > 0:\n keep = np.where(np.minimum(ws, hs) < max_size)[0]\n elif min_size > 0:\n keep = np.where(np.maximum(ws, hs) > min_size)[0]\n return keep", "def bbox_size(label_sitk):\n\n # Setting Bounding Box\n F_statistics = sitk.LabelShapeStatisticsImageFilter()\n\n F_statistics.Execute(label_sitk)\n bbox_dims = F_statistics.GetBoundingBox(1)\n return list(bbox_dims[3:6])", "def get_grid_dimensions(current_problem_size, params, grid_div, block_size_names):\n def get_dimension_divisor(divisor_list, default, params):\n if divisor_list is None:\n if default in params:\n divisor_list = [default]\n else:\n return 1\n return numpy.prod([int(eval(replace_param_occurrences(s, params))) for s in divisor_list])\n divisors = [get_dimension_divisor(d, block_size_names[i], params) for i, d in enumerate(grid_div)]\n return tuple(int(numpy.ceil(float(current_problem_size[i]) / float(d))) for i, d in enumerate(divisors))", "def getSize(self):\n size = [None,None]\n try:\n for i in self.itemType.find('parameters'):\n paramType = i.find('type').text.strip()\n if paramType.startswith('size-w'):\n size[0] = round(float(self.params[i.find('name').text]))\n if paramType.startswith('size-h'):\n size[1] = round(float(self.params[i.find('name').text]))\n except:\n pos = [16,16]\n return size", "def CalculatePaneSizerLimits(self, dock, pane):\r\n \r\n if pane.IsFixed():\r\n if dock.IsHorizontal():\r\n minPix = maxPix = pane.rect.x + 1 + pane.rect.width\r\n else:\r\n minPix = maxPix = pane.rect.y + 1 + pane.rect.height\r\n\r\n return minPix, maxPix\r\n \r\n totalPixsize, totalProportion = self.GetTotalPixSizeAndProportion(dock)\r\n partnerPane = self.GetPartnerPane(dock, pane)\r\n\r\n if dock.IsHorizontal():\r\n \r\n minPix = pane.rect.x + 1\r\n maxPix = pane.rect.x + 1 + pane.rect.width\r\n\r\n if pane.min_size.IsFullySpecified():\r\n minPix += pane.min_size.x\r\n else:\r\n minPix += 1\r\n\r\n if partnerPane:\r\n maxPix += partnerPane.rect.width\r\n\r\n if partnerPane.min_size.IsFullySpecified():\r\n maxPix -= partnerPane.min_size.x - 1\r\n \r\n else:\r\n minPix = maxPix\r\n \r\n else:\r\n \r\n minPix = pane.rect.y + 1\r\n maxPix = pane.rect.y + 1 + pane.rect.height\r\n\r\n if pane.min_size.IsFullySpecified():\r\n minPix += pane.min_size.y\r\n else:\r\n minPix += 1\r\n\r\n if partnerPane: \r\n maxPix += partnerPane.rect.height\r\n\r\n if partnerPane.min_size.IsFullySpecified():\r\n maxPix -= partnerPane.min_size.y - 1\r\n \r\n else: \r\n minPix = maxPix\r\n \r\n return minPix, maxPix", "def image_size(size):\n l_max = max(size)\n if l_max > 300:\n num = l_max/300\n else:\n num = 1\n w = round(size[0] / num)\n h = round(size[1] / num)\n new_size = [w, h]\n return new_size", "def get_bound(box_list):\n box_xyxy_list = []\n for box in box_list:\n box_xyxy = xywh2xyxy(box)\n box_xyxy_list.append(box_xyxy)\n\n box_xyxy_list = np.array(box_xyxy_list)\n x1max, y1max, x2max, y2max = np.amax(box_xyxy_list, axis=0)\n x1min, y1min, x2min, y2min = np.amin(box_xyxy_list, axis=0)\n\n boundbox = xyxy2xywh([x1min, y1min, x2max, y2max])\n return boundbox", "def getLimit(self) :\n\t\treturn (self.modelSizeX, self.modelSizeY)", "def len_square(bound):\n\treturn (8 - 2 * bound)", "def getwinsize(self):", "def getDims(f):\n size = len(f) * .001\n if (size <= 10):\n width = 32\n elif (size <= 30):\n width = 64\n elif (size <= 60):\n width = 128\n elif (size <= 100):\n width = 256\n elif (size <= 200):\n width = 384\n elif (size <= 500):\n width = 512\n elif (size <= 1000):\n width = 768\n else:\n width = 1024\n return (width, math.ceil(size*1000 // width)+ 1)", "def scalebox(self, b):\n return [int(b[0]*self.video_w/self.detection_image_size[0]),\n int(b[1]*self.video_h/self.detection_image_size[1]),\n int(b[2]*self.video_w/self.detection_image_size[0]),\n int(b[3]*self.video_h/self.detection_image_size[1])]", "def boundaries_size(*args):\n return _ida_hexrays.boundaries_size(*args)", "def _query_min_max_size(self):\n\n # Collect contributions of child widgets\n mima1 = [0, 1e9, 0, 0]\n for child in self.children:\n mima2 = child._size_limits\n mima1[0] = max(mima1[0], mima2[0])\n mima1[1] = min(mima1[1], mima2[1])\n mima1[2] += mima2[2]\n mima1[3] += mima2[3]\n\n # Dont forget padding and spacing\n extra_padding = 2\n extra_spacing = 2\n for i in range(4):\n mima1[i] += extra_padding\n mima1[2] += extra_spacing\n mima1[3] += extra_spacing\n\n # Own limits\n mima3 = super()._query_min_max_size()\n\n # Combine own limits with limits of children\n return [max(mima1[0], mima3[0]),\n min(mima1[1], mima3[1]),\n max(mima1[2], mima3[2]),\n min(mima1[3], mima3[3])]", "def _xywh2min_max(box):\n x, y, w, h = box\n return np.array([x, y, x+w, y+h])", "def optimal_chunksizes(nt, nlat, nlon):\n\n clon = np.sqrt(1000000.0 * nlon / (nlat * nt))\n clat = nlat * clon / nlon\n return (nt, int(np.ceil(clat)), int(np.ceil(clon)))", "def box_area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def box_area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def _calculate_default_size_config():\n matrix_size = range(2048, 38912, 2048)\n leading_dim = range(2112, 38976, 2048)\n return [(size, lead) for size, lead in zip(matrix_size, leading_dim)]", "def width_of_box(box: ndarray) -> float:\n\n (w, _) = dimensions_of_box(box)\n return w", "def check_size(self,x,y):\n assert(x <= 10**3), 'Width larger than 1000' \n assert(y <= 10**3), 'Height larger than 1000' \n assert(x*y <= 3*(10**5)), 'Resolution larger than 300000'", "def boxes_minmax_to_whctrs(boxes, in_place = False):\n if not in_place:\n boxes = np.copy(boxes)\n # Calculate the widths:\n boxes[:,2] = boxes[:2] - boxes[:,0]\n boxes[:,3] = boxes[:3] - boxes[:,1]\n\n # Move the min to the center:\n boxes[:,0] += 0.5*boxes[:,2]\n boxes[:,1] += 0.5*boxes[:,3]\n\n return boxes", "def boundingBoxArea(box):\n return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)", "def _get_new_sizes(value, value_range, size_range):\n slope = (size_range[1] - size_range[0]) / float(value_range[1]- value_range[0])\n return size_range[0] + slope * (value - value_range[0])", "def message_box_size_limit(self) -> ConfigNodePropertyInteger:\n return self._message_box_size_limit", "def render_limits(\n origin: tuple[float, float],\n size_in_inches: tuple[float, float],\n scale: float,\n) -> tuple[float, float, float, float]:\n min_x, min_y = origin\n max_x = min_x + size_in_inches[0] * scale\n max_y = min_y + size_in_inches[1] * scale\n return min_x, min_y, max_x, max_y", "def _make_size_dict(evo_config):\n\n # get default variables from config\n min_size = evo_config['individuals']['min_box_size']\n max_size = evo_config['individuals']['max_box_size']\n is_random = evo_config['individuals']['random_box_size']\n symmetric = evo_config['individuals']['symmetric']\n\n # make random sizes for each box\n size_dict = {key: None for key in ['left_hand', 'right_hand', 'left_foot', 'right_foot', 'chest', 'hip']}\n\n for size_key in size_dict.keys():\n\n # if symmetric, we can skip the computation for the other limb\n if symmetric and 'right_hand' == size_key and size_dict['left_hand'] is not None:\n size_dict[size_key] = size_dict['left_hand']\n continue\n elif symmetric and 'left_hand' == size_key and size_dict['right_hand'] is not None:\n size_dict[size_key] = size_dict['right_hand']\n continue\n elif symmetric and 'left_foot' == size_key and size_dict['right_foot'] is not None:\n size_dict[size_key] = size_dict['right_foot']\n continue\n elif symmetric and 'right_foot' == size_key and size_dict['left_foot'] is not None:\n size_dict[size_key] = size_dict['left_foot']\n continue\n\n # make 3 values according to the selected policy\n if is_random:\n limb_size = np.random.rand(3) * (max_size - min_size) + min_size\n else:\n limb_size = np.asarray([(max_size + min_size) / 2] * 3)\n\n size_dict[size_key] = limb_size\n\n return size_dict", "def get_dimensions(self):\r\n x = []\r\n y = []\r\n z = []\r\n for i in self.verts:\r\n x.append(i[0])\r\n y.append(i[1])\r\n z.append(i[2])\r\n\r\n x.append(abs(min(x)))\r\n y.append(abs(min(y)))\r\n z.append(abs(min(z)))\r\n\r\n return max(x), max(y), max(z)", "def get_size():\n opt = ['regular', 'small', 'large']\n inp = option_menu(opt, 'Widget size:')\n OPTIONS['size'] = opt[inp]\n add_to_collected('size', opt[inp])\n return", "def ssd_size_bounds_to_values(size_bounds,\n n_feat_layers,\n img_shape=(300, 300)):\n assert img_shape[0] == img_shape[1]\n\n img_size = img_shape[0]\n min_ratio = int(size_bounds[0] * 100)\n max_ratio = int(size_bounds[1] * 100)\n step = int(math.floor((max_ratio - min_ratio) / (n_feat_layers - 2)))\n # Start with the following smallest sizes.\n sizes = [[img_size * size_bounds[0] / 2, img_size * size_bounds[0]]]\n for ratio in range(min_ratio, max_ratio + 1, step):\n sizes.append((img_size * ratio / 100.,\n img_size * (ratio + step) / 100.))\n return sizes", "def getWidthHeight(size: str) -> typing.Tuple[int, int]:\n return {\n CommonPrefs.SZ_640x480: (640, 480),\n CommonPrefs.SZ_800x600: (800, 600),\n CommonPrefs.SZ_1024x768: (1024, 768),\n CommonPrefs.SZ_1366x768: (1366, 768),\n CommonPrefs.SZ_1920x1080: (1920, 1080),\n CommonPrefs.SZ_FULLSCREEN: (-1, -1),\n }.get(size, (1024, 768))", "def size_in(self):\n return self.dimensions", "def box_scale(k, m, s_min=0.1, s_max=0.9):\n\n # equation 4 from paper\n return s_min + (s_max - s_min) * (k - 1) / (m - 1)", "def GetNiceExtentsByDivisions(minval,maxval,divisions,tolerance):\n pass", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corner_idx)\n corners.append(tile.opp_corner_idx)\n corners = np.stack(corners)\n\n # Find extremes\n min_yx = corners.min(axis=0)\n max_yx = corners.max(axis=0)\n\n size = max_yx - min_yx\n centre = -min_yx\n return tuple(size), centre", "def _get_attention_sizes(resnet_size):\n choices = {\n 18: [False, [False, True], [True, False], False],\n 34: [False, [False, True, True, False], False, False],\n 50: [False, [False, False, False, False], True, False],\n 101: [False, False, True, True],\n 152: [False, False, True, True],\n 200: [False, False, True, True]\n }\n\n try:\n return choices[resnet_size]\n except KeyError:\n err = (\n 'Could not find layers for selected Resnet size.\\n'\n 'Size received: {}; sizes allowed: {}.'.format(\n resnet_size, choices.keys()))\n raise ValueError(err)", "def get_bounds(shape, affine):\n adim, bdim, cdim = shape\n adim -= 1\n bdim -= 1\n cdim -= 1\n # form a collection of vectors for each 8 corners of the box\n box = np.array([[0., 0, 0, 1],\n [adim, 0, 0, 1],\n [0, bdim, 0, 1],\n [0, 0, cdim, 1],\n [adim, bdim, 0, 1],\n [adim, 0, cdim, 1],\n [0, bdim, cdim, 1],\n [adim, bdim, cdim, 1]]).T\n box = np.dot(affine, box)[:3]\n return zip(box.min(axis=-1), box.max(axis=-1))", "def boxes_whctrs_to_minmax(boxes, in_place = False):\n\n if not in_place:\n boxes = np.copy(boxes)\n\n # Move the centers to be the minima:\n boxes[:,0] -= 0.5*boxes[:,2]\n boxes[:,1] -= 0.5*boxes[:,3]\n\n # Add the width to the start to get the max:\n boxes[:,2] += boxes[:,0] \n boxes[:,3] += boxes[:,1] \n\n return boxes", "def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 10)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size", "def tile_size_2d(self):\n return 32.0, 32.0", "def get_width_and_height_from_size(x):\n if isinstance(x, int):\n return x, x\n if isinstance(x, list) or isinstance(x, tuple):\n return x\n else:\n raise TypeError()", "def _get_size(self, bbox: BBox) -> tuple[int, int]:\n if self.size is not None:\n return self.size\n\n if self.resolution is not None:\n return bbox_to_dimensions(bbox, self.resolution)\n\n raise ValueError(\"Size or resolution for the requests should be provided!\")", "def _get_center_coordinates_and_sizes_vector(box_data):\n ymin, xmin, ymax, xmax = [np.squeeze(i) for i in np.split(box_data, 4, 0)]\n width = np.subtract(xmax, xmin)\n height = np.subtract(ymax, ymin)\n ycenter = np.add(ymin, np.multiply(height, 0.5))\n xcenter = np.add(xmin, np.multiply(width, 0.5))\n return ycenter, xcenter, height, width", "def get_sizes(tokens):\n\n f, ax = plt.subplots()\n r = find_renderer(f)\n boxes = []\n for tok in tokens + [\" \".join(tokens)]:\n box = measure_text(tok, r, ax)\n boxes.append(box)\n bboxes = boxes[:-1]\n line_height = boxes[-1][1]\n avg_space_width = (boxes[-1][0] - sum([w for w, _ in bboxes])) / (\n len(tokens) - 1)\n space_widths = []\n for fst, fstbox, snd, sndbox in zip(tokens[:-1], bboxes[:-1], tokens[1:],\n bboxes[1:]):\n pair = fst + \" \" + snd\n box = measure_text(pair, r, ax)\n fstbox = measure_text(fst, r, ax)\n sndbox = measure_text(snd, r, ax)\n space_width = box[0] - fstbox[0] - sndbox[0]\n space_widths.append(space_width)\n avg_pairwise_space_width = sum(space_widths) / len(space_widths)\n ratio = avg_space_width / avg_pairwise_space_width\n space_widths = [w * ratio for w in space_widths]\n # add a dummy final space\n space_widths.append(0.0)\n return bboxes, line_height, space_widths", "def find_size(mod):\n left = right = top = bottom = 0\n\n for line in (n for n in mod if n[0] == \"fp_line\"):\n layer = [n for n in line if n[0] == \"layer\"][0]\n if layer[1] in (\"F.CrtYd\", \"B.CrtYd\"):\n start = [n for n in line if n[0] == \"start\"][0]\n end = [n for n in line if n[0] == \"end\"][0]\n for x, y in (start[1:], end[1:]):\n x = float(x)\n y = float(y)\n left = min(x, left)\n right = max(x, right)\n top = min(y, top)\n bottom = max(y, bottom)\n\n width = right - left\n height = bottom - top\n\n left -= width * border_ratio\n right += width * border_ratio\n top -= height * border_ratio\n bottom += height * border_ratio\n\n return left, right, top, bottom", "def isValidTeamSize(size, minimum, maximum) :\n\n return isInteger(size) and int(size) >= minimum and int(size) <= maximum", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corners())\n corners = np.concatenate(corners)[:, :2] / self._pixel_shape\n\n # Find extremes, add 1 px margin to allow for rounding errors\n min_xy = corners.min(axis=0).astype(int) - 1\n max_xy = corners.max(axis=0).astype(int) + 1\n\n size = max_xy - min_xy\n centre = -min_xy\n # Switch xy -> yx\n return tuple(size[::-1]), centre[::-1]", "def layers_sizes(self):\n return iter([self.delta_h*l for l in range(int(self.h/self.delta_h)-1)])", "def calculate_size(self, num_dots):\n self.objects = num_dots\n square = sqrt(self.objects)\n if self.objects % square == 0:\n return int(square), int(square)\n else:\n denom = self.objects // sqrt(self.objects)\n while self.objects % denom != 0:\n denom -= 1\n return int(denom), int(self.objects // denom)", "def _calc_figure_size(self):\n\n figheight_default = 6.125\n minsize = 0.025 * figheight_default # minimum size of an axis\n\n axheight = min(minsize, self.maxheight * figheight_default)\n\n w = 8. # inches\n # colorbar, gap, title + individual axes\n h = self.cb_height * figheight_default * 3. + self.get_n() * axheight\n\n return (w, h)", "def _get_split_sizes(self, n_examples):\n\n min_ex = (int(n_examples // self.n_splits)\n * np.ones(self.n_splits, dtype=np.int8))\n \n rem = np.array(\n [1 if i < n_examples % self.n_splits else 0\n for i in range(self.n_splits)],\n dtype=np.int8)\n\n return np.add(min_ex, rem)", "def get_size_inches(self):\n width, height = self.figure.get_size_inches()\n bbox = self.get_position()\n width = width * abs(bbox.width)\n height = height * abs(bbox.height)\n return width, height", "def dual_size(k_max: int):\n n = 2 * k_max + 1\n return n", "def get_sizes(petsc_dofs_range, n_dof, n_components):\n drange = tuple(n_components * nm.asarray(petsc_dofs_range))\n n_loc = drange[1] - drange[0]\n n_all_dof = n_dof * n_components\n sizes = (n_loc, n_all_dof)\n\n return sizes, drange", "def get_image_sizes():\n widths = []\n heights = []\n\n from settings import folders_location\n for individual_folder_name in listdir(folders_location):\n individual_training_folder_path = folders_location + individual_folder_name + \"/training/\"\n\n image_paths = listdir(individual_training_folder_path)\n for image_path in image_paths:\n img = cv2.imread(individual_training_folder_path + image_path)\n\n height, width, channel = img.shape\n widths.append(width)\n heights.append(height)\n\n print(individual_training_folder_path + image_path)\n\n print(\"Min: %s, Max: %s\" % (np.min(widths), np.max(widths)))\n print(\"Average: %s\" % (np.average(widths)))\n\n return widths", "def box_area(box):\n x1, y1, x2, y2 = box\n w = x2 - x1\n h = y2 - y1\n return float(w) * h", "def normalize_size(getsizes, minscale: Union[int, float] = 0.5, maxscale: Union[int, float] = 4, scaler: str = 'zscore'):\n # Instead of Min-Max scaling, that shrinks any distribution in the [0, 1] interval, scaling the variables to\n # Z-scores is better. Min-Max Scaling is too sensitive to outlier observations and generates unseen problems,\n\n # Set sizes to 0 if not available\n getsizes[np.isinf(getsizes)]=0\n getsizes[np.isnan(getsizes)]=0\n\n # out-of-scale datapoints.\n if scaler == 'zscore' and len(np.unique(getsizes)) > 3:\n getsizes = (getsizes.flatten() - np.mean(getsizes)) / np.std(getsizes)\n getsizes = getsizes + (minscale - np.min(getsizes))\n elif scaler == 'minmax':\n try:\n from sklearn.preprocessing import MinMaxScaler\n except:\n raise Exception('sklearn needs to be pip installed first. Try: pip install scikit-learn')\n # scaling\n getsizes = MinMaxScaler(feature_range=(minscale, maxscale)).fit_transform(getsizes).flatten()\n else:\n getsizes = getsizes.ravel()\n # Max digits is 4\n getsizes = np.array(list(map(lambda x: round(x, 4), getsizes)))\n\n return getsizes", "def _grid_hint_size(self) -> int:", "def test_get_grid_size(flopy_dis_mf6):\n flopy_dis, mf6 = flopy_dis_mf6\n mf6.initialize()\n\n prescribed_grid_size = flopy_dis.nrow * flopy_dis.ncol\n\n # Getting the grid id from the model, requires specifying one variable\n k11_tag = mf6.get_var_address(\"K11\", flopy_dis.model_name, \"NPF\")\n grid_id = mf6.get_var_grid(k11_tag)\n\n assert prescribed_grid_size == mf6.get_grid_size(grid_id)", "def get_engine_size():\n possibilities = [d for d in GraphParameters.double_range(1.2, 4.2, 0.2)]\n return choice(possibilities)", "def _check_image_size(self, size):\n if size % 32 == 0:\n return (0, 0)\n else:\n imageBorder = 32 - (size % 32)\n if (imageBorder % 2) == 0:\n return (int(imageBorder / 2), int(imageBorder / 2))\n else:\n return (int(imageBorder / 2), int((imageBorder / 2) + 1))", "def roi_y_size():\n def r(x):\n return x & 0xFFF\n\n def w(x):\n return min(x, 0xFFF)\n return r, w", "def search_space_size(self):", "def get_dim():\n return (Settings.width, Settings.height)", "def bounding_box(self):\n box_min = []\n box_max = []\n if self.is_empty():\n raise ValueError('empty polytope is not allowed')\n for i in range(0, self.space_dimension()):\n x = Variable(i)\n coords = [ v.coefficient(x) for v in self.generators() ]\n max_coord = max(coords)\n min_coord = min(coords)\n box_max.append(max_coord)\n box_min.append(min_coord)\n return (tuple(box_min), tuple(box_max))", "def _generate_dimensions(conf_dict, message):\n sq_side = math.ceil(math.sqrt(_minimal_pixel_count(conf_dict, message)))\n return (sq_side, sq_side)", "def get_subplot_sizes(num):\n if num <= 0:\n return 1, 1\n if num <= 3:\n return 1, num\n if num <= 8:\n return 2, int(ceil(num / 2.))\n if num <= 15:\n return 3, int(ceil(num / 3.))\n if num <= 24:\n return 4, int(ceil(num / 4.))\n raise ValueError(\"cannot place %d subplots\" % num)", "def get_final_bounding_box(boxes, nms_idx, width: int, height: int):\n x1 = np.inf\n y1 = np.inf\n x2 = -np.inf\n y2 = -np.inf\n\n bx = [boxes[i] for i in nms_idx]\n for box in bx:\n xmin = np.min(box[[0, 2]])\n xmax = np.max(box[[0, 2]])\n ymin = np.min(box[[1, 3]])\n ymax = np.max(box[[1, 3]])\n\n x1 = np.min([xmin, x1])\n y1 = np.min([ymin, y1])\n x2 = np.max([xmax, x2])\n y2 = np.max([ymax, y2])\n return x1, y1, x2, y2", "def get_all_sizes(self) -> List[int]:\n sizes = [key.get_size_of_complex() for key in self._complexes.keys()]\n return sizes", "def _get_bounds(x, y, size):\n x = np.array(np.atleast_1d(x))\n y = np.array(np.atleast_1d(y))\n\n lower_x = np.rint(x - size[0]/2)\n lower_y = np.rint(y - size[1]/2)\n\n return np.stack((np.stack((lower_x, lower_x + size[0]), axis=1),\n np.stack((lower_y, lower_y + size[1]), axis=1)), axis=1).astype(int)", "def avail_sizes(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_sizes function must be called with \"\n \"-f or --function, or with the --list-sizes option\"\n )\n\n sizes = {\n \"Micro Instance\": {\"id\": \"1\", \"ram\": 1024, \"disk\": 50, \"cores\": 1},\n \"Small Instance\": {\"id\": \"2\", \"ram\": 2048, \"disk\": 50, \"cores\": 1},\n \"Medium Instance\": {\"id\": \"3\", \"ram\": 4096, \"disk\": 50, \"cores\": 2},\n \"Large Instance\": {\"id\": \"4\", \"ram\": 7168, \"disk\": 50, \"cores\": 4},\n \"Extra Large Instance\": {\"id\": \"5\", \"ram\": 14336, \"disk\": 50, \"cores\": 8},\n \"Memory Intensive Instance Medium\": {\n \"id\": \"6\",\n \"ram\": 28672,\n \"disk\": 50,\n \"cores\": 4,\n },\n \"Memory Intensive Instance Large\": {\n \"id\": \"7\",\n \"ram\": 57344,\n \"disk\": 50,\n \"cores\": 8,\n },\n }\n\n return sizes", "def optimal_grid(num):\n\n # get maximum shape\n shape = int(np.ceil(np.sqrt(num)))\n # get number of rows and columns based on maximum shape\n if shape ** 2 == num:\n nrows = shape\n ncols = shape\n else:\n nrows = int(np.ceil(num / shape))\n ncols = int(np.ceil(num / nrows))\n # get position of figures\n pos = []\n for i in range(nrows):\n for j in range(ncols):\n pos.append([i, j])\n # return nrows, ncols and positions\n return nrows, ncols, pos", "def center_size(boxes):\n concat = P.Concat(1)\n return concat(((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2])) # w, h" ]
[ "0.6898253", "0.67774564", "0.6747777", "0.6713784", "0.6704732", "0.66019756", "0.66019756", "0.65982324", "0.6596363", "0.6499011", "0.64778125", "0.6461232", "0.643119", "0.6366755", "0.6308479", "0.6291537", "0.6291537", "0.6284666", "0.6251369", "0.6221639", "0.6197825", "0.6192271", "0.61398184", "0.6121089", "0.6085505", "0.6049268", "0.6043943", "0.6039048", "0.60380536", "0.6026183", "0.6005126", "0.59791684", "0.59691304", "0.5951909", "0.59332615", "0.59308076", "0.5926832", "0.5919082", "0.590259", "0.59017783", "0.5893275", "0.5893257", "0.588383", "0.5875387", "0.5875387", "0.5842805", "0.5839371", "0.5839131", "0.58283985", "0.5819935", "0.5805234", "0.58002764", "0.57873833", "0.5783283", "0.57793695", "0.5762267", "0.57569975", "0.57560253", "0.5755918", "0.57519674", "0.5744013", "0.5735341", "0.5730038", "0.57276034", "0.57270366", "0.5724694", "0.5723262", "0.57194394", "0.5712667", "0.57075953", "0.56908256", "0.5690684", "0.569023", "0.56893337", "0.5685742", "0.5682553", "0.56816274", "0.5678942", "0.56783956", "0.56717414", "0.56687886", "0.5668156", "0.56612396", "0.5654219", "0.56514984", "0.56401855", "0.5639933", "0.5627136", "0.56266207", "0.5622665", "0.56191313", "0.5618303", "0.5613745", "0.56133366", "0.56084263", "0.5603859", "0.56010747", "0.56005585", "0.5599068", "0.5590526" ]
0.7289415
0
won't load any data yet because the files are gzipped and just reading the header takes 2.6 G and a long time! This means we need to set magzp and scale later when we read
def _make_image_info_hst(self, flistname): flist=[] magzp_list=[] with open(flistname) as fobj: for line in fobj: ls = line.split() fname = ls[0] magzp = float(ls[1]) #fname=line.strip() flist.append(fname) magzp_list.append(magzp) magzp = np.array(magzp_list) nimage = len(flist) path_len = max([len(f) for f in flist]) try: ext_len = len(self['image_ext']) except: ext_len=None #image_info = meds.util.get_image_info_struct( image_info = get_image_info_struct( nimage, path_len, ext_len=ext_len, ) image_info['position_offset'] = 1 image_info['image_ext'] = self['image_ext'] image_info['weight_ext'] = self['weight_ext'] for i,f in enumerate(flist): image_info['image_id'][i] = i image_info['image_path'][i] = f image_info['weight_path'][i] = f.replace('sci.fits','wht.fits') image_info['magzp'] = magzp image_info['scale'] = self._get_scale_from_magzp(magzp) return image_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def parseDec_GZIPLoader(self, data): # Once gziploader is decrypted, parse the 0xA9 byte config\n hardcoded_val = data[0:1]\n flag = data[1:2]\n datfile_size = int.from_bytes(data[2:6], byteorder=\"little\")\n print(\"[+] datfile size: \", datfile_size)\n dllfile_size = int.from_bytes(data[6:10], byteorder=\"little\")\n print(\"[+] dllfile size: \", dllfile_size)\n dirname = self.extractStrFromBuff(data[10:])\n print(\"[+] Directory Name:\", dirname)\n\n count = self.incNulls(data[10 + len(dirname):])\n datname = self.extractStrFromBuff(data[count + 10 + len(dirname):])\n print(\"[+] Dat Name :\", datname)\n\n count = count + 10 + len(dirname) + len(datname)\n datname = datname[1:]\n count2 = self.incNulls(data[count:])\n count = count + count2\n dllname = self.extractStrFromBuff(data[count:])\n print(\"[+] Dll Name: \", dllname)\n count += len(dllname)\n count2 = self.incNulls(data[count:])\n count += count2\n # datfile offset is now 710 bytes in\n datfile_data = data[710:710 + datfile_size]\n dllfile_start = 710 + datfile_size\n dllfile_data = data[dllfile_start:dllfile_start + dllfile_size]\n datfile_b64 = base64.b64encode(datfile_data).decode()\n dllfile_b64 = base64.b64encode(dllfile_data).decode()\n ParsedDict = {\"Directory_Name\": dirname.decode(),\n \"DatFile_Name\": datname.decode(),\n \"DllFile_Name\": dllname.decode(),\n \"DatFile\": datfile_b64,\n \"DllFile\": dllfile_b64}\n\n return ParsedDict", "def load_file(path):\n with open(path, \"rb\") as f: # bsps are binary files\n byte_list = f.read() # stores all bytes in bytes1 variable (named like that to not interfere with builtin names\n header = load_header(byte_list)\n skin_names = [byte_list[header.ofs_skins + 64 * x:header.ofs_skins + 64 * x + 64].decode(\"ascii\", \"ignore\") for x in range(header.num_skins)]\n triangles = load_triangles(byte_list[header.ofs_tris:header.ofs_frames], header)\n frames = load_frames(byte_list[header.ofs_frames:header.ofs_glcmds], header)\n texture_coordinates = load_texture_coordinates(byte_list[header.ofs_st:header.ofs_tris], header)\n gl_commands = load_gl_commands(byte_list[header.ofs_glcmds:header.ofs_end])\n # print(header)\n # print(skin_names)\n # print(triangles)\n # print(frames)\n # print(texture_coordinates)\n for i in range(len(texture_coordinates)):\n texture_coordinates[i].s = texture_coordinates[i].s/header.skinwidth\n texture_coordinates[i].t = texture_coordinates[i].t / header.skinheight\n # print(texture_coordinates)\n # print(header.num_xyz)\n for i_frame in range(len(frames)):\n for i_vert in range((header.num_xyz)):\n frames[i_frame].verts[i_vert].v[0] = frames[i_frame].verts[i_vert].v[0]*frames[i_frame].scale.x+frames[i_frame].translate.x\n frames[i_frame].verts[i_vert].v[1] = frames[i_frame].verts[i_vert].v[1] * frames[i_frame].scale.y + frames[i_frame].translate.y\n frames[i_frame].verts[i_vert].v[2] = frames[i_frame].verts[i_vert].v[2] * frames[i_frame].scale.z + frames[i_frame].translate.z\n model = md2_object(header, skin_names, triangles, frames, texture_coordinates, gl_commands)\n return model", "def lazy_read_file(self):\n store = zarr.DirectoryStore(self.fpath)\n z_array = zarr.open(store=store, mode='r')\n self.da_input = da.from_array(z_array)\n self.data = self.da_input\n self.data_dim = self.data.shape\n self.chunk_size = z_array.chunks", "def loader(filename,wdm=0,verbose=0,kmpers=1):\n with open(filename, 'rb') as f:\n if wdm == False:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n infoBytes = f.tell()\n if verbose>2:\n print(infoBytes)\n #skip darkmatter\n #read the first dm line\n if verbose>2:\n print(f.tell())\n catd = np.fromfile(f,dtype= dmdtype, count=1) \n #get the bytes location and subtract off the bytes location after loading info to get n bytes a line for dm\n if verbose>2:\n print(f.tell())\n current = f.tell()\n dmBytes = current-infoBytes\n f.seek(dmBytes*(info['nd'][0]-1)+current)\n if verbose>2:\n print(f.tell())\n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n else:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n if verbose>2:\n print(f.tell())\n # #dark matter setup count is reading the number of ?rows? \n catd= np.fromfile(f,dmdtype, count=info['nd'][0]) \n if verbose>2:\n print(f.tell()) \n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n \n \n #convert to physical units as found in README.md\n if wdm == True:\n catd['mass']*=2.324876e9\n if kmpers == 1:\n catd['vx']*=100.\n catd['vy']*=100.\n catd['vz']*=100.\n cats['mass']*=2.324876e9\n if kmpers == 1:\n cats['vx']*=100.\n cats['vy']*=100.\n cats['vz']*=100.\n \n if wdm == True:\n return(catd,cats,info)\n else:\n return(cats,info)", "def process_raw_data(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack, shift, maxSpeed='45',\n minSpeed='2'):\n import pandas as pd\n from datetime import datetime\n import os,gzip,csv,sys\n from numpy import pi\n import numpy as np\n radians = False\n\n import sys\n if sys.platform == 'win32':\n windows = True\n elif sys.platform != 'win32':\n windows = False\n\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n\n # reading in the data with specific headers\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n infoHeader = \"FILENAME\\n\"\n # somehow gZIP is indicating if it is the first file name (I think if it is 0 then it is the first file)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename, 'r')\n else:\n f = open(xDir + \"/\" + xFilename, 'r')\n\n infoHeader = \"FILENAME\\n\"\n\n # process\n # if first time on this car/date, then write header out\n headerNames = sHeader.split(',')\n xdat = str('20') + xFilename[11:17]\n # xdat = str(xFilename[len(xCar)+1:len(xCar) + 9])\n\n # fnOut = xOutDir + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_dat.csv\" #set CSV output for raw data\n # fnLog = xOutDir + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_log.csv\" #output for logfile\n\n fnOut = xOut + xCar + \"_\" + xdat + \"_dat.csv\" # set CSV output for raw data\n fnLog = xOut + xCar + \"_\" + xdat + \"_log.csv\" # output for logfile\n infOut = xOut + xCar + \"_\" + xdat + \"_info.csv\"\n #\n\n dtime = open(xDir + xFilename).readlines().pop(1).split(',')[0]\n firstdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]), int(dtime[14:16]),\n int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n #firsttime = firstdate.strftime('%s.%f')\n firsttime = dt_to_epoch(firstdate)\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n if bFirst:\n # fOut = open(fnOut, 'w')\n # fOut.write(sOutHeader)\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n if not bFirst:\n fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n fOut = open(fnOutTemp, 'w')\n fOut.write(sOutHeader)\n\n # read all lines\n xCntObs = -1\n xCntGoodValues = 0\n for row in f:\n bGood = True\n if xCntObs < 0:\n bGood = False\n xCntObs += 1\n if bGood:\n lstS = row.split(\",\")\n dtime = lstS[0]\n dateob = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n # epoch = dateob.strftime('%s.%f')\n # dtime = int(dateob.strftime('%Y%m%d%H%M%S'))\n\n fdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n #seconds = fdate.strftime('%s.%f')\n seconds = dt_to_epoch(fdate)\n def getNS(seconds):\n ns = str(float(seconds) * 1e-3)[11:]\n #str(pd.to_numeric(str(float(seconds) * 1e-3)[11:]) * 100000)[:9]\n return (str(ns).ljust(15, '0'))[:9]\n\n csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(dateob.strftime('%H:%M:%S')) + ',' + str(\n str(float(seconds)*1e-3)[:10]) + ',' + getNS(seconds) + str(',')\n csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(\n lstS[4]) + ',' + str('0') + ',' + str(lstS[4]) + ','\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(\n lstS[4]) + ',' + str(lstS[5]) + ',' + str(lstS[6]) + ','\n csvWrite += str(lstS[7]) + ',' + str(lstS[8]) + ',' + str(lstS[9]) + ',' + str(\n lstS[10]) + ',' + str(lstS[11]) + ',' + str(lstS[12]) + ',' + str(lstS[13]) + str(',') + str(\n lstS[14])\n\n if float(seconds) >= (float(firsttime) + (60 * float(initialTimeBack))):\n fOut.write(csvWrite)\n del (seconds)\n del (csvWrite)\n xCntObs += 1\n # sOut = str(gZIP) + \",\" + str(f) + \",\" + str(xCntObs) + \",\" + str(xCntGoodValues) + \"\\n\"\n # fLog.write(sOut)\n infOut.write(str(xFilename) + '\\n')\n fOut.close()\n fLog.close()\n infOut.close()\n\n print(f\"{xCar} \\t {xdat} \\t {fnOut[-(17 + len(xCar)):]} \\t {xCntObs} \\t {xCntGoodValues} \\t {gZIP}\")\n\n wind_df = pd.read_csv(fnOutTemp)\n wind_df['shift_CH4'] = wind_df.CH4.shift(periods=int(float(shift)))\n wind_df['raw_CH4'] = wind_df.apply(lambda row: row['BCH4'], axis=1)\n wind_df['BCH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['CH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['TCH4'] = wind_df.loc[:, ['shift_CH4']]\n\n wind_df['shift_R'] = wind_df.R.shift(periods=int(float(shift)))\n wind_df['raw_R'] = wind_df.apply(lambda row: row['R'], axis=1)\n wind_df_not_null = wind_df.loc[wind_df['LAT'].notnull(),].reset_index(drop=True)\n del (wind_df)\n wind_df = wind_df_not_null.copy()\n\n radians = False\n wind_df['QUADRANT'] = wind_df.apply(lambda row: get_quadrant(row['U'], row['V']), axis=1)\n wind_df['secnan'] = wind_df.apply(lambda row: row['SECONDS'] + row['NANOSECONDS'] * 1e-9,\n axis=1) # + row['NANOSECONDS']*1e-9,axis=1)\n wind_df['prev_LAT'] = wind_df.LAT.shift(periods=1)\n wind_df['next_LAT'] = wind_df.LAT.shift(periods=-1)\n wind_df['prev_LONG'] = wind_df.LONG.shift(periods=1)\n wind_df['next_LONG'] = wind_df.LONG.shift(periods=-1)\n wind_df['prev_TIME'] = wind_df.secnan.shift(periods=1)\n wind_df['next_TIME'] = wind_df.secnan.shift(periods=-1)\n wind_df['distance'] = wind_df.apply(\n lambda row: haversine(row['prev_LAT'], row['prev_LONG'], row['next_LAT'], row['next_LONG']), axis=1)\n wind_df['bearing'] = wind_df.apply(\n lambda row: calc_bearing(row['prev_LAT'], row['next_LAT'], row['prev_LONG'], row['next_LONG'], radians),\n axis=1)\n wind_df['timediff'] = wind_df.apply(lambda row: row['next_TIME'] - row['prev_TIME'], axis=1)\n wind_df['VELOCITY_calc'] = wind_df.apply(lambda row:calc_velocity(row['timediff'],row['distance']),axis=1)\n wind_df = wind_df.drop(columns = ['VELOCITY'])\n wind_df = wind_df.rename(columns = {'VELOCITY_calc':'VELOCITY'})\n\n wind_df['VELOCITY'] = wind_df.apply(lambda x: (str(x.VELOCITY)), axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda x: 0 if x.VELOCITY == 'XX.X' else x.VELOCITY, axis=1)\n wind_df['fVel'] = wind_df[\"VELOCITY\"].str.split(\".\", n=1, expand=True)[0]\n wind_df = wind_df.loc[wind_df['fVel'].notnull(),:].reset_index(drop=True)\n wind_df = wind_df.loc[wind_df['fVel'] != 'nan',:].reset_index(drop=True)\n wind_df['firstVel'] = wind_df.apply(lambda x: int(x['fVel']), axis=1)\n\n wind_df['sVel'] = wind_df[\"VELOCITY\"].str.split(\".\", n=1, expand=True)[1]\n wind_df = wind_df.loc[wind_df['sVel'].notnull(),].reset_index(drop=True)\n wind_df['secVel'] = wind_df.apply(lambda x: int(x['sVel']), axis=1)\n wind_df['vloc'] = wind_df.apply(lambda x: float(str(x.firstVel) + '.' + str(x.secVel)), axis=1)\n wind_df2 = wind_df.drop(columns=['VELOCITY', 'secVel', 'sVel', 'fVel', 'firstVel'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'vloc': 'VELOCITY'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n ## CORRECT W WIND THING\n wind_df['W'] = wind_df.apply(lambda x: (str(x.W)), axis=1)\n wind_df['W'] = wind_df.apply(lambda x: 0 if x.W == 'XX.X' else x.W, axis=1)\n wind_df['W'] = wind_df.apply(lambda x: '0.0' if x.W == '0' else x.W, axis = 1)\n wind_df['fW'] = wind_df[\"W\"].str.split(\".\", n=1, expand=True)[0]\n # wind_df = wind_df.loc[wind_df['fW'].notnull(),].reset_index(drop=True)\n wind_df['firstW'] = wind_df.apply(lambda x: int(x['fW']), axis=1)\n wind_df['sW'] = wind_df[\"W\"].str.split(\".\", n=1, expand=True)[1]\n # wind_df = wind_df.loc[wind_df['sW'].notnull(),].reset_index(drop=True)\n wind_df['secW'] = wind_df.apply(lambda x: int(x['sW']), axis=1)\n wind_df['wloc'] = wind_df.apply(lambda x: float(str(x.firstW) + '.' + str(x.secW)), axis=1)\n wind_df2 = wind_df.drop(columns=['W', 'secW', 'sW', 'fW', 'firstW'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'wloc': 'W'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n\n ## CORRECT U WIND THING\n wind_df['U'] = wind_df.apply(lambda x: (str(x.U)), axis=1)\n wind_df['U'] = wind_df.apply(lambda x: 0 if x.U == 'XX.X' else x.U, axis=1)\n wind_df['U'] = wind_df.apply(lambda x: '0.0' if x.U == '0' else x.U, axis = 1)\n\n wind_df['fU'] = wind_df[\"U\"].str.split(\".\", n=1, expand=True)[0]\n wind_df['firstU'] = wind_df.apply(lambda x: int(x['fU']), axis=1)\n wind_df['sU'] = wind_df[\"U\"].str.split(\".\", n=1, expand=True)[1]\n wind_df['secU'] = wind_df.apply(lambda x: int(x['sU']), axis=1)\n wind_df['uloc'] = wind_df.apply(lambda x: float(str(x.firstU) + '.' + str(x.secU)), axis=1)\n wind_df2 = wind_df.drop(columns=['U', 'secU', 'sU', 'fU', 'firstU'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'uloc': 'U'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n\n ## CORRECT V WIND THING\n wind_df['V'] = wind_df.apply(lambda x: (str(x.V)), axis=1)\n wind_df['V'] = wind_df.apply(lambda x: 0 if x.V == 'XX.X' else x.V, axis=1)\n wind_df['V'] = wind_df.apply(lambda x: '0.0' if x.V == '0' else x.V, axis = 1)\n\n wind_df['fV'] = wind_df[\"V\"].str.split(\".\", n=1, expand=True)[0]\n wind_df['firstV'] = wind_df.apply(lambda x: int(x['fV']), axis=1)\n wind_df['sV'] = wind_df[\"V\"].str.split(\".\", n=1, expand=True)[1]\n wind_df['secV'] = wind_df.apply(lambda x: int(x['sV']), axis=1)\n wind_df['vloc'] = wind_df.apply(lambda x: float(str(x.firstV) + '.' + str(x.secV)), axis=1)\n wind_df2 = wind_df.drop(columns=['V', 'secV', 'sV', 'fV', 'firstV'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'vloc': 'V'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n\n wind_df['U_cor'] = wind_df.apply(lambda row: float(row['U']) + float(row['VELOCITY']), axis=1)\n wind_df['horz_length'] = wind_df.apply(lambda row: np.sqrt(row['U_cor'] ** 2 + row['V'] ** 2), axis=1)\n wind_df['uncor_theta'] = wind_df.apply(\n lambda row: calc_bearing(row['U_cor'], row['V'], row['QUADRANT'], row['horz_length'], radians), axis=1)\n wind_df['adj_theta'] = wind_df.apply(lambda row: (row['uncor_theta'] + row['bearing']) % 360, axis=1)\n wind_df['totalWind'] = wind_df.apply(lambda row: np.sqrt(row['horz_length'] ** 2 + row['W'] ** 2), axis=1)\n wind_df['phi'] = wind_df.apply(lambda row: np.arctan(row['horz_length']), axis=1)\n\n wind_df['adj_v'] = wind_df.apply(lambda row: -row['horz_length'] * np.cos(row['adj_theta']), axis=1)\n wind_df['adj_u'] = wind_df.apply(lambda row: row['horz_length'] * np.sin(row['adj_theta']), axis=1)\n\n ## GO THROUGH WIND\n window_size = 30\n u_series = pd.Series(wind_df['adj_u'])\n u_windows = u_series.rolling(window_size)\n u_averages = pd.DataFrame(u_windows.mean())\n u_averages.columns = ['U_avg']\n u_averages['key'] = u_averages.index\n\n v_series = pd.Series(wind_df['adj_v'])\n v_windows = v_series.rolling(window_size)\n v_averages = pd.DataFrame(v_windows.mean())\n v_averages.columns = ['V_avg']\n v_averages['key'] = v_averages.index\n\n w_series = pd.Series(wind_df['W'])\n w_windows = w_series.rolling(window_size)\n w_averages = pd.DataFrame(w_windows.mean())\n w_averages.columns = ['W_avg']\n w_averages['key'] = w_averages.index\n\n vw_df = w_averages.set_index('key').join(v_averages.set_index('key'))\n vw_df['key'] = vw_df.index\n uvw_df = vw_df.set_index('key').join(u_averages.set_index('key'))\n uvw_df['key'] = uvw_df.index\n wind_df2 = wind_df.copy()\n wind_df2['key'] = wind_df2.index\n wind_df = uvw_df.set_index('key').join(wind_df2.set_index('key'))\n\n wind_df['r_avg'] = wind_df.apply(lambda row: np.sqrt(row['U_avg'] ** 2 + row['V_avg'] ** 2), axis=1)\n wind_df['theta_avg'] = wind_df.apply(lambda row: 0 if row.V_avg == 0 else np.arctan(-row['U_avg'] / row['V_avg']), axis=1)\n # wind_df2 = wind_df[wind_df.CH4.notnull()]\n wind_df3 = wind_df[wind_df.CH4.notnull()].drop(columns=\n ['QUADRANT', 'secnan', 'prev_LAT', 'next_LAT', 'prev_LONG',\n 'next_LONG', 'prev_TIME', 'next_TIME',\n 'timediff', 'uncor_theta', 'CH4', 'R'], axis=1)\n wind_df3['CH4'] = wind_df3.loc[:, 'shift_CH4']\n wind_df3['R'] = wind_df3.loc[:, 'shift_R']\n wind_df3 = wind_df3.drop(['shift_CH4', 'shift_R'], axis=1)\n # wind_df4 = wind_df3.loc[wind_df3.totalWind.notnull(),:]\n wind_df3['odometer'] = wind_df3.loc[:, 'distance'].cumsum()\n wind_df4 = wind_df3.loc[:,\n ['DATE', 'TIME', 'SECONDS', 'NANOSECONDS', 'VELOCITY', 'U', 'V', 'W', 'BCH4', 'BRSSI', 'TCH4',\n 'TRSSI',\n 'PRESS_MBAR', 'INLET', 'TEMPC', 'CH4', 'H20', 'C2H6', 'R', 'C2C1', 'BATTV', 'POWMV', 'CURRMA',\n 'SOCPER',\n 'LAT', 'LONG', 'bearing', 'U_cor', 'horz_length', 'adj_theta', 'totalWind',\n 'phi', 'raw_CH4', 'raw_R', 'U_avg', 'V_avg', 'W_avg', 'r_avg', 'theta_avg', 'distance', 'odometer']]\n\n # wind_df7 = add_odometer(wind_df4,'LAT','LONG')\n\n # wind_df4 = wind_df7.copy()\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > xMinCarSpeed, :]\n wind_df6 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :]\n\n # wind_df6 = wind_df6a.loc[wind_df6a.R > .6999, :]\n\n del (wind_df4)\n wind_df4 = wind_df6.copy().drop_duplicates()\n wind_df5 = wind_df4.loc[wind_df4.CH4.notnull(), :]\n wind_df4 = wind_df5.copy()\n wind_df4 = wind_df5.copy()\n if bFirst:\n wind_df4.to_csv(fnOut, index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(fnOut, index=False)\n os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def process_raw_data_what(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack, shift, maxSpeed='45',\n minSpeed='2'):\n import pandas as pd\n from datetime import datetime\n import os,gzip,csv,sys\n from numpy import pi\n import numpy as np\n radians = False\n\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n\n # reading in the data with specific headers\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n infoHeader = \"FILENAME\\n\"\n # somehow gZIP is indicating if it is the first file name (I think if it is 0 then it is the first file)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename, 'r')\n else:\n f = open(xDir + \"/\" + xFilename, 'r')\n\n infoHeader = \"FILENAME\\n\"\n\n # process\n # if first time on this car/date, then write header out\n headerNames = sHeader.split(',')\n xdat = str('20') + xFilename[11:17]\n # xdat = str(xFilename[len(xCar)+1:len(xCar) + 9])\n\n # fnOut = xOutDir + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_dat.csv\" #set CSV output for raw data\n # fnLog = xOutDir + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_log.csv\" #output for logfile\n\n fnOut = xOut + xCar + \"_\" + xdat + \"_dat.csv\" # set CSV output for raw data\n fnLog = xOut + xCar + \"_\" + xdat + \"_log.csv\" # output for logfile\n infOut = xOut + xCar + \"_\" + xdat + \"_info.csv\"\n #\n\n dtime = open(xDir + xFilename).readlines().pop(1).split(',')[0]\n firstdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]), int(dtime[14:16]),\n int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n #firsttime = firstdate.strftime('%s.%f')\n firsttime = dt_to_epoch(firstdate)\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n if bFirst:\n # fOut = open(fnOut, 'w')\n # fOut.write(sOutHeader)\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n if not bFirst:\n fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n fOut = open(fnOutTemp, 'w')\n fOut.write(sOutHeader)\n\n # read all lines\n xCntObs = -1\n xCntGoodValues = 0\n for row in f:\n bGood = True\n if xCntObs < 0:\n bGood = False\n xCntObs += 1\n if bGood:\n lstS = row.split(\",\")\n dtime = lstS[0]\n dateob = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n # epoch = dateob.strftime('%s.%f')\n # dtime = int(dateob.strftime('%Y%m%d%H%M%S'))\n\n fdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n #seconds = fdate.strftime('%s.%f')\n seconds = float(dt_to_epoch(fdate)) * 1e-3\n\n if 1 == 2: #sys.platform.startswith('win'):\n csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(dateob.strftime('%H:%M:%S')) + ',' + str(\n int(pd.to_numeric(dateob.strftime('%S.%f')))) + ',' + str(\n pd.to_numeric(dateob.strftime('%f')) * 1000) + str(',')\n csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(\n lstS[4]) + ',' + str('0') + ',' + str(lstS[4]) + ','\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(\n lstS[4]) + ',' + str(lstS[5]) + ',' + str(lstS[6]) + ','\n csvWrite += str(lstS[7]) + ',' + str(lstS[8]) + ',' + str(lstS[9]) + ',' + str(\n lstS[10]) + ',' + str(lstS[11]) + ',' + str(lstS[12]) + ',' + str(lstS[13]) + str(',') + str(\n lstS[14])\n if 1==1:\n csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(dateob.strftime('%H:%M:%S')) + ',' + str(\n str(seconds)[:10]) + ',' + str(int(pd.to_numeric(str(seconds)[11:]) * 1000)) + str(',')\n csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(\n lstS[4]) + ',' + str('0') + ',' + str(lstS[4]) + ','\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(\n lstS[4]) + ',' + str(lstS[5]) + ',' + str(lstS[6]) + ','\n csvWrite += str(lstS[7]) + ',' + str(lstS[8]) + ',' + str(lstS[9]) + ',' + str(\n lstS[10]) + ',' + str(lstS[11]) + ',' + str(lstS[12]) + ',' + str(lstS[13]) + str(',') + str(\n lstS[14])\n if float(seconds) >= (float(firsttime) + (60 * float(initialTimeBack))):\n fOut.write(csvWrite)\n del (seconds)\n del (csvWrite)\n\n xCntObs += 1\n\n # sOut = str(gZIP) + \",\" + str(f) + \",\" + str(xCntObs) + \",\" + str(xCntGoodValues) + \"\\n\"\n # fLog.write(sOut)\n infOut.write(str(xFilename) + '\\n')\n\n fOut.close()\n fLog.close()\n infOut.close()\n\n print(f\"{xCar} \\t {xdat} \\t {fnOut[-(17 + len(xCar)):]} \\t {xCntObs} \\t {xCntGoodValues} \\t {gZIP}\")\n\n wind_df = pd.read_csv(fnOutTemp)\n wind_df['QUADRANT'] = wind_df.apply(lambda row: get_quadrant(row['U'], row['V']), axis=1)\n wind_df['secnan'] = wind_df.apply(lambda row: row['SECONDS'] + row['NANOSECONDS'] * 1e-9,\n axis=1) # + row['NANOSECONDS']*1e-9,axis=1)\n wind_df['prev_LAT'] = wind_df.LAT.shift(periods=1)\n wind_df['next_LAT'] = wind_df.LAT.shift(periods=-1)\n wind_df['prev_LONG'] = wind_df.LONG.shift(periods=1)\n wind_df['next_LONG'] = wind_df.LONG.shift(periods=-1)\n wind_df['prev_TIME'] = wind_df.secnan.shift(periods=1)\n wind_df['next_TIME'] = wind_df.secnan.shift(periods=-1)\n wind_df['distance'] = wind_df.apply(\n lambda row: haversine(row['prev_LAT'], row['prev_LONG'], row['next_LAT'], row['next_LONG']), axis=1)\n wind_df['bearing'] = wind_df.apply(\n lambda row: calc_bearing(row['prev_LAT'], row['next_LAT'], row['prev_LONG'], row['next_LONG'], radians),\n axis=1)\n wind_df['timediff'] = wind_df.apply(lambda row: row['next_TIME'] - row['prev_TIME'], axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda row: calc_velocity(row['timediff'], row['distance']), axis=1)\n wind_df['U_cor'] = wind_df.apply(lambda row: row['U'] + row['VELOCITY'], axis=1)\n wind_df['horz_length'] = wind_df.apply(lambda row: np.sqrt(row['U_cor'] ** 2 + row['V'] ** 2), axis=1)\n wind_df['uncor_theta'] = wind_df.apply(\n lambda row: calc_bearing(row['U_cor'], row['V'], row['QUADRANT'], row['horz_length'], radians), axis=1)\n wind_df['adj_theta'] = wind_df.apply(lambda row: (row['uncor_theta'] + row['bearing']) % 360, axis=1)\n wind_df['totalWind'] = wind_df.apply(lambda row: np.sqrt(row['horz_length'] ** 2 + row['W'] ** 2), axis=1)\n wind_df['phi'] = wind_df.apply(lambda row: np.arctan(row['horz_length']), axis=1)\n wind_df['shift_CH4'] = wind_df.CH4.shift(periods=int(float(shift)))\n wind_df['raw_CH4'] = wind_df.apply(lambda row: row['BCH4'], axis=1)\n wind_df['BCH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['CH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['TCH4'] = wind_df.loc[:, ['shift_CH4']]\n\n wind_df2 = wind_df[wind_df.CH4.notnull()]\n wind_df2 = wind_df.copy()\n wind_df3 = wind_df2.drop(\n ['QUADRANT', 'secnan', 'prev_LAT', 'next_LAT', 'prev_LONG', 'next_LONG', 'prev_TIME', 'next_TIME',\n 'timediff', 'uncor_theta', 'CH4'], axis=1)\n wind_df3['CH4'] = wind_df3.loc[:, 'shift_CH4']\n wind_df3 = wind_df3.drop(['shift_CH4'], axis=1).loc[:,\n ['DATE', 'TIME', 'SECONDS', 'NANOSECONDS', 'VELOCITY', 'U', 'V', 'W', 'BCH4', 'BRSSI', 'TCH4',\n 'TRSSI',\n 'PRESS_MBAR', 'INLET', 'TEMPC', 'CH4', 'H20', 'C2H6', 'R', 'C2C1', 'BATTV', 'POWMV', 'CURRMA',\n 'SOCPER',\n 'LAT', 'LONG', 'bearing', 'U_cor', 'horz_length', 'adj_theta', 'totalWind', 'phi', 'raw_CH4',\n 'distance']]\n wind_df3['odometer'] = wind_df3.loc[:, 'distance'].cumsum()\n wind_df4 = wind_df3.copy()\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > xMinCarSpeed, :]\n wind_df4 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :].copy().drop_duplicates()\n wind_df5 = wind_df4.loc[wind_df4.CH4.notnull(), :]\n wind_df4 = wind_df5.copy()\n if bFirst:\n wind_df4.to_csv(fnOut, index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(fnOut, index=False)\n os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def load(self):\n #print self.fileInfo.name\n progress = self.progress\n filePath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n self.fileSize = os.path.getsize(filePath)\n #--Localize\n cells = self.cells\n records = self.records\n canSave = self.canSave\n skipObjRecords = self.skipObjRecords\n contTypes = set(['CREC','CNTC','NPCC'])\n levTypes = set(('LEVC','LEVI'))\n debrisIds = self.debrisIds\n debrisTypes = set(debrisIds.keys())\n #--Header\n inPath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n ins = Tes3Reader(self.fileInfo.name,file(inPath,'rb'))\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n self.tes3 = Tes3(name,size,delFlag,recFlag,ins,True)\n if not canSave: del self.tes3.others[:]\n #--Progress info\n progress = self.progress\n progress(0.0,'Loading '+self.fileInfo.name)\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n #print \"%s [%d]\" % (name,size)\n #--CELL?\n if name == 'CELL':\n record = Cell(name,size,delFlag,recFlag,ins,0,skipObjRecords)\n cells.append(record)\n if canSave: records.append(record)\n #--Contents\n elif canSave and name in contTypes:\n if name == 'CREC':\n record = Crec(name,size,delFlag,recFlag,ins,True)\n elif name == 'CNTC':\n record = Cntc(name,size,delFlag,recFlag,ins,True)\n else:\n record = Npcc(name,size,delFlag,recFlag,ins,True)\n self.conts.append(record)\n self.conts_id[record.getId()] = record\n records.append(record)\n #--File Map\n elif name == 'FMAP':\n record = Fmap(name,size,delFlag,recFlag,ins)\n self.fmap = record\n records.append(record)\n #--Landscapes\n elif name == 'LAND':\n record = Land(name,size,delFlag,recFlag,ins)\n self.lands[record.getId()] = record\n records.append(record)\n #--Scripts\n elif canSave and name == 'SCPT':\n record = Scpt(name,size,delFlag,recFlag,ins,True)\n records.append(record)\n if record.getRef():\n self.refs_scpt[record] = record.getRef()\n #--Save debris info?\n elif name in debrisTypes:\n record = Record(name,size,delFlag,recFlag,ins)\n id = record.getId()\n if id:\n debrisIds[name].append(id.lower())\n if canSave:\n records.append(record)\n #--Skip Non-cell?\n elif not canSave:\n ins.seek(size,1,name)\n #--Keep non-cell?\n else:\n records.append(Record(name,size,delFlag,recFlag,ins))\n #--Done Reading\n ins.close()\n #--Analyze Cells\n cntCells = 0\n progress.setMax(len(self.cells))\n for cell in self.cells:\n cell.load(None,1)\n self.cells_id[cell.getId()] = cell\n if not canSave:\n cell.data = None #--Free some memory\n #--Progress\n cntCells += 1\n progress(cntCells)\n #--Scripts\n if self.refs_scpt:\n self.updateScptRefs()", "def read_data(infile):\n extension = os.path.splitext(infile)[1]\n h = read_header(infile)\n nx = int(h['num_x_pts'])\n ny = int(h['num_y_pts'])\n nt = int(h['num_t_pts'])\n fid = open(infile, 'rb')\n fid.seek(512) #skip header\n if extension == '.aps' or extension == '.a3daps':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, ny, nt, order='F').copy() #make N-d image\n elif extension == '.a3d':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, nt, ny, order='F').copy() #make N-d image\n elif extension == '.ahi':\n data = np.fromfile(fid, dtype = np.float32, count = 2* nx * ny * nt)\n data = data.reshape(2, ny, nx, nt, order='F').copy()\n real = data[0,:,:,:].copy()\n imag = data[1,:,:,:].copy()\n fid.close()\n if extension != '.ahi':\n return data\n else:\n return real, imag", "def __init__(self, path: str):\n self._path = path\n self._fp = gzip.open(self._path, mode=\"r\")", "def read_gz(images,labels):\n\t# Open the images with gzip in read binary mode\n\t# images = gzip.open('../MNIST-data/train-images-idx3-ubyte.gz', 'rb')\n\t# labels = gzip.open('../MNIST-data/train-labels-idx1-ubyte.gz', 'rb')\n\n\t# Read the binary data\n\n\t# We have to get big endian unsigned int. So we need '>I'\n\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]#28\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]#28\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0] #60000\n\t# print(number_of_images);\n\n\tif number_of_images != N:\n\t raise Exception('number of labels did not match the number of images')\n\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array #60000X28X28\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t if i % 1000 == 0:\n\t print(\"i: %i\" % i)\n\t for row in range(rows):\n\t for col in range(cols):\n\t tmp_pixel = images.read(1) # Just a single byte\n\t tmp_pixel = unpack('>B', tmp_pixel)[0]\n\t x[i][row][col] = tmp_pixel\n\t tmp_label = labels.read(1)\n\t y[i] = unpack('>B', tmp_label)[0]\n\t # print(y.shape)#60000X1\n\treturn (x, y)", "def _load(self):\n # Extract the ASCII header (5 first lines)\n with open(self._xst_bin, 'rb') as f:\n header = list(islice(f, 0, 5))\n assert header[0] == b'HeaderStart\\n',\\\n 'Wrong header start'\n assert header[-1] == b'HeaderStop\\n',\\\n 'Wrong header stop'\n header = [s.decode('utf-8') for s in header]\n hd_size = sum([len(s) for s in header])\n\n # Parse informations into a metadata dictionnary\n keys = ['freq', 'ma', 'accu']\n search = ['Freq.List', 'Mr.List', 'accumulation']\n types = ['float64', 'int', 'int']\n for key, word, typ in zip(keys, search, types):\n for h in header:\n if word in h:\n self.meta[key] = np.array(\n h.split('=')[1].split(','),\n dtype=typ\n )\n\n # Deduce the dtype for decoding\n n_ma = self.meta['ma'].size\n n_sb = self.meta['freq'].size\n dtype = np.dtype(\n [('jd', 'float64'),\n ('data', 'complex64', (n_sb, n_ma*n_ma*2 + n_ma))]\n )\n\n # Decoding the binary file\n tmp = np.memmap(\n filename=self._xst_bin,\n dtype='int8',\n mode='r',\n offset=hd_size\n )\n decoded = tmp.view(dtype)\n\n self.data = decoded['data'] / self.meta['accu']\n self.time = Time(decoded['jd'], format='jd', precision=0)\n\n return", "def __init__(self, input_stream, level=9):\n super(Gzip, self).__init__(input_stream)\n\n self._level = level", "def load_data(path, rng, epoch, batch_size, x_,y_):\n #global x_,t_,y_,\n #global first_report2 \n #first_report2 = True\n start_time = time()\n v,p,skeleton_feature,l = load_gzip(path)\n v = v[:,:,:res_shape[2]]\n res_shape[0] = v.shape[0]\n v_new = empty(res_shape,dtype=\"uint8\")\n\n for i in xrange(v.shape[0]): #batch\n if p[i] < 10: p[i] = 100\n ofs = p[i]*ratio\n mid = v.shape[-1]/2.\n sli = None\n if ofs < mid:\n start = int(round(mid-ofs))\n end = int(round(mid+ofs))\n sli = slice(start,end)\n\n for j in xrange(v.shape[2]): #maps\n for k in xrange(v.shape[3]): #frames\n #body\n img = v[i,0,j,k]\n img = cut_img(img,5)\n img = misc.imresize(img,(h,h))\n # if j==0: img = 255-misc.imfilter(img,\"contour\")\n v_new[i,0,j,k] = img\n\n #hand\n img = v[i,1,j,k]\n img = img[sli,sli]\n img = misc.imresize(img,(h,h))\n v_new[i,1,j,k] = img\n\n vid, lbl = v_new,l\n\n #if epoch==0: print \"get in\",str(time()-start_time)[:3]+\"s\",\n # shuffle data\n ind = rng.permutation(l.shape[0])\n ind = ind[:batch_size]\n vid = vid[:,:,:,:4,:,:]\n vid, skeleton_feature, lbl = vid[ind].astype(floatX), skeleton_feature[ind].astype(floatX),lbl[ind].astype(floatX)\n #vid, skeleton_feature, lbl = vid.astype(floatX), skeleton_feature.astype(floatX),lbl.astype(floatX)\n\n # vid = vid/(255./(scaler*2.))-scaler\n #traj = traj/(255./(scaler_traj*2.))-scaler_traj\n # traj = traj/(255./5.)\n\n # Wudi already made labels start from 0\n #lbl -= 1 \n\n #if first_report2:\n # print \"data range:\",vid.min(),vid.max()\n # print \"traj range:\",skeleton_feature.min(),skeleton_feature.max()\n # print \"lbl range:\",lbl.min(),lbl.max()\n # first_report2 = False\n\n # set value\n x_.set_value(vid, borrow=True)\n #t_.set_value(skeleton_feature, borrow=True)\n y_.set_value(lbl, borrow=True)", "def _read_data(self):\n with self._open(self.filename, 'rb') as f:\n try:\n f.seek(self._offset_data, self._offset_whence)\n except IOError:\n print('Error: hedp.io.HamamatsuFile seeking outside of file limits.')\n print(' Failed to parse file.')\n print(\" Either the 'offset' or 'dtype' input arguments must be wrong!\")\n raise\n except:\n raise\n\n data_len = np.prod(self.shape)*np.dtype(self._dtype).itemsize\n data_str = f.read(data_len)\n if data_len != len(data_str):\n print(data_len, len(data_str))\n raise ValueError('File ended before all data was read. Probably wrong offset or dtype!')\n\n\n self.data = np.fromstring(data_str, dtype=self._dtype).reshape(self.shape[::-1])\n self.data = np.ndarray.astype(self.data, 'float32')\n\n #self.data = np.fromfile(f, dtype=self._dtype,\n # count=np.prod(self.shape)).reshape(self.shape[::-1])", "def read_texture_file(filename):\n \n # Deal with compressed files.\n import os\n if (os.path.splitext(filename)[1] == '.gz'):\n import gzip\n f = gzip.open(filename, 'rb')\n else:\n f = open(filename, 'r')\n\n # Stuff everything into a dict and a list\n # for now. Sort this out later (we will probably \n # want to have objects at some point\n header_data = {}\n particles = []\n\n header_lines = 5\n particle_header_lines = 9\n \n for line in f:\n if header_lines == 5:\n header_data['theia_lun'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 4:\n header_data['npartsallo'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 3:\n header_data['npartsused'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 2:\n header_data['n_expected_particles'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 1:\n header_data['nseen_particles'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 0:\n if particle_header_lines == 9:\n this_particle = {}\n this_particle['process_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 8:\n this_particle['particle_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 7:\n this_particle['old_particle_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 6:\n this_particle['old_process_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 5:\n this_particle['particle_class'] = line.strip()\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 4:\n this_particle['particle_position'] = np.array(\n [line[0:12], line[12:24], line[24:36]])\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 3:\n this_particle['idata_count'] = int(line)\n if this_particle['idata_count'] > 0:\n particle_header_lines = particle_header_lines - 1\n else:\n particle_header_lines = particle_header_lines - 2\n elif particle_header_lines == 2:\n this_particle['particle_idata'] = np.array(\n [line.rstrip('\\r\\n')[i:i+12] for i in xrange(0, len(line.rstrip('\\r\\n')), 12)]\n )\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 1:\n this_particle['rdata_count'] = int(line)\n if this_particle['rdata_count'] > 0:\n particle_header_lines = particle_header_lines - 1\n else:\n particles.append(this_particle)\n particle_header_lines = 9\n elif particle_header_lines == 0:\n this_particle['particle_rdata'] = np.array(\n [line.rstrip('\\r\\n')[i:i+14] for i in xrange(0, len(line.rstrip('\\r\\n')), 14)]\n )\n particles.append(this_particle)\n particle_header_lines = 9\n f.close()\n\n return header_data, particles", "def get_inf_sizes(song_or_key):\n if isinstance(song_or_key, basestring):\n k = song_or_key\n else:\n k = song_key(song_or_key)\n path = os.path.join(LYRICS_DIR, k+'.txt.gz.infgen')\n with open(path) as f:\n return parse_infgen.parse_ratio(f)", "def extract_data(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(28 * 28 * 10000 * 1)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (255 / 2.0)) / 255\n data = data.reshape(10000, 28, 28, 1)\n return data", "def LoadMMMetaData(filename):\r\n## print \"loading MM Metadata\"\r\n file = open(filename,'r')\r\n data = file.read()\r\n file.close()\r\n data = data.replace(\"false\",\"False\")\r\n data = data.replace(\"true\",\"True\")\r\n data = data.replace(\"null\",\"0\")\r\n f = eval(str(data))\r\n tiles = []\r\n for i in f.keys():\r\n if i != \"Summary\":\r\n tiles.append(i)\r\n xpos = f[tiles[0]][\"XPositionUm\"]\r\n ypos = f[tiles[0]][\"YPositionUm\"]\r\n zpos = f[tiles[0]][\"ZPositionUm\"] \r\n ScaleFactorX= f[\"Summary\"][\"PixelSize_um\"]\r\n ScaleFactorY= ScaleFactorX\r\n Width=f[\"Summary\"][\"Width\"]\r\n Height=f[\"Summary\"][\"Height\"]\r\n extent=[xpos-(Width/2)*ScaleFactorX,xpos+(Width/2)*ScaleFactorX,\\\r\n ypos-(Height/2)*ScaleFactorY,ypos+(Height/2)*ScaleFactorY] #FOR NOW\r\n\r\n #WHY WAS IT + THEN - FOR Y??\r\n return extent,zpos", "def _load(self):\n\n # number of non-data header details at top of data file\n header = 1\n\n # open file\n weatherData = []\n with open(self.wfile) as myfile:\n if (self.lines > 0):\n weatherData = [next(myfile) for x in xrange(self.lines + header)]\n else:\n weatherData = myfile.readlines()\n\n # get data stream from first line\n streamHeader = weatherData.pop(0).rstrip()\n if (streamHeader == 'FULL'):\n self.dataStream = 0\n elif (streamHeader == 'ADVANCED'):\n self.dataStream = 1\n elif (streamHeader == 'BASIC'):\n self.dataStream = 2\n else:\n print \"Error: unecognised data stream from file %s\" % (self.wfile)\n return -1\n\n # read data\n inputData = []\n for line in weatherData:\n entries = line.split()\n inputData.append(entries)\n\n # copy all into np array\n self.data = np.array(inputData)\n\n return 0", "def _read_member_header(self):\n header = _read_gzip_header(self._fileobj)\n offset = self._fileobj.tell()\n if \"RA\" not in header[\"extra_field\"]:\n try:\n if self._fileobj.seekable():\n self.stream.seek(0)\n except AttributeError:\n pass\n raise IOError(\"Not an idzip file: %r\" % self.name)\n\n dictzip_field = _parse_dictzip_field(header[\"extra_field\"][\"RA\"])\n num_member_chunks = len(dictzip_field[\"zlengths\"])\n\n start_chunk_index = len(self._chunks)\n for zlen in dictzip_field[\"zlengths\"]:\n self._chunks.append((offset, zlen))\n offset += zlen\n self._last_zstream_end = offset\n\n chlen = dictzip_field[\"chlen\"]\n sure_size = chlen * (num_member_chunks - 1)\n self._add_member(chlen, start_chunk_index, sure_size)", "def test_gzip_file_no_extension(self):\n # Write the data to a file\n temp_file = tempfile.NamedTemporaryFile()\n with gzip.open(temp_file.name, 'wb') as out:\n for item in self.data:\n serialzed = json.dumps(item).encode()\n out.write(serialzed + b'\\n')\n\n # Load from file, ensure it is correct\n actual_data = []\n with JsonlReader(temp_file.name) as f:\n for item in f:\n actual_data.append(item)\n self.assertEqual(self.data, actual_data)", "def _read_data(self):", "def process_raw_data_eng(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack,\n shift, maxSpeed='45', minSpeed='2'):\n import pandas as pd\n from datetime import datetime\n import os\n import gzip\n import sys\n from math import floor\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n\n ########################################################################\n #### WE DON'T HAVE AN RSSI INPUT\n ### (SO THIS IS A PLACEHOLDER FOR SOME SORT OF QA/QC VARIABLE)\n ## xMinRSSI = 50 #if RSSI is below this we don't like it\n ##################################################################\n\n # reading in the (.txt) data with specific headers --> need to change this\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n # sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n # sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n # sHeader = \"Time Stamp,Inlet Number,P (mbars),T0 (degC),T5 (degC), Laser PID Readout,Det PID Readout,win0Fit0,win0Fit1,win0Fit3,win1Fit4,win0Fit5,win0Fit6,win0Fit7,win0Fit8,win0Fit9,win1Fit0,win1Fit1,win1Fit2,win1Fit3,win1Fit4,win1Fit5,win1Fit6,Det Bkgd,Ramp Ampl,CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Battery T (degC),FET T (degC),GPS Time,Latitude,Longitude\"\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T0 (degC),T5 (degC),Laser PID Readout,Det PID Readout,win0Fit0,win0Fit1,win0Fit2,win0Fit3,win0Fit4,win0Fit5,win0Fit6,win0Fit7,win0Fit8,win0Fit9,win1Fit0,win1Fit1,win1Fit2,win1Fit3,win1Fit4,win1Fit5,win1Fit6,Det Bkgd,Ramp Ampl,CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Battery T (degC),FET T (degC),GPS Time,Latitude,Longitude\"\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n\n headerNames = sHeader.split(',')\n GPS_loc = 37 # Where the GPS data is located (in the row)\n\n infoHeader = \"FILENAME\\n\"\n\n # gZIP is indicating if it is a ZIP file (I don't think I've written this in)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename,'r')\n else:\n #f = open(xDir + \"/\" + xFilename, 'r')\n f = open(xDir + xFilename, 'r')\n\n ### FIGURING OUT DATE FROM FILENAME (WILL NEED TO CHANGE THIS IF DIFFERENT FILENAME)\n xdat = str('20') + xFilename[11:17]\n\n fnOut = xOut + xCar + \"_\" + xdat + \"_dat.csv\" # set CSV output for raw data\n fnLog = xOut + xCar + \"_\" + xdat + \"_log.csv\" # output for logfile\n infOut = xOut + xCar + \"_\" + xdat + \"_info.csv\"\n\n # FINDING THE FIRST TIME NOTED\n firsttime = int(float(open(xDir + xFilename).readlines().pop(1).split(',')[37][:-4]))\n\n ## MAKING TEMPORARY FILE (FOR IF LATER YOU HAVE TO ADD A DATE)\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n if bFirst:\n # 3fOut = open(fnOutTemp, 'w')\n # fOut.write(sOutHeader)\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n if not bFirst:\n fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n fOut = open(fnOutTemp, 'w')\n fOut.write(sOutHeader)\n\n # READ IN THE LINES\n xCntObs = -1\n xCntGoodValues = 0\n for row in f:\n bGood = True\n if xCntObs < 0:\n bGood = False\n xCntObs += 1\n\n if bGood:\n lstS = row.split(',')\n gpstime = lstS[GPS_loc]\n dtime = lstS[0]\n dt = lstS[1]\n time_dt = lstS[2]\n epoch = lstS[3]\n # nano = lstS[4]\n\n gps_time = lstS[37]\n dateob = datetime.fromtimestamp(int(gps_time[:-4]))\n nano = gps_time[-4:]\n\n # dateob = datetime(int(dt[0:4]),int(dt[5:7]),int(dt[8:10]),int(time_dt[0:2]),int(time_dt[3:5]),int(time_dt[6:8]),int(float(nano)*1e-9))\n\n dtime = int(dateob.strftime('%Y%m%d%H%M%S'))\n # Date = dateob.strftime('%m%/%d/%Y')\n Date = dateob.strftime('%Y-%m-%d')\n\n GPS_Time = dateob.strftime('%H%:%M:%S')\n seconds = floor(float(gpstime))\n nano = dateob.strftime('%f')\n\n # dateob = datetime(int(dtime[6:10]),int(dtime[0:2]),int(dtime[3:5]),int(dtime[11:13]),int(dtime[14:16]),int(dtime[17:19]),int(float(dtime[19:23])*1000000))\n # epoch = dateob.strftime('%s.%f')\n\n # THIS IS USING THE CSU METHOD. IN OUR METHOD, WE DO THE SPEED LATER IN THE ALGORITHM.\n\n # # if RSSI of bottome sensor is below 50 if float(lstS[28]) < xMinRSSI: fLog.write(\"RSSI (Bottom)\n # value less than 50: \"+ str(lstS[28]) + \"\\n\") continue # Car Speed if float(lstS[12]) >\n # xMaxCarSpeed: fLog.write(\"Car speed of \" + str(float(lstS[12])) + \" exceeds max threshold of: \" +\n # str(xMaxCarSpeed) + \"\\n\") continue if float(lstS[12]) < xMinCarSpeed: fLog.write(\"Car speed of \" +\n # str(float(lstS[12])) + \" less than min threshold of: \" + str(xMinCarSpeed) + \"\\n\") continue\n\n # For some reason it is producing its longitude in positive number while USA is located at negative longitude\n # thats why we do -1 * float(lstS[7])\n\n # fix this when we have stuffs\n\n # s1 = str(lstS[1])+\",\"+str(lstS[2])+\",\"+str(lstS[3])+\",\"+str(lstS[4])+\",\"+str(lstS[6])+\",\"\n # s1 += str(-1 * float(lstS[7]))+\",\"+str(lstS[12])+\",\"+str(lstS[14])+\",\"+str(lstS[15])+\",\"+str(lstS[16])+\",\"+str(lstS[25])+\",\"\n # s1 += str(lstS[28])+\",\"+str(lstS[38])+\",\"+str(lstS[41])+\"\\n\"\n\n ## choosing what to write in the .csv\n\n # if sys.platform.startswith('win'):\n # ## DATE, TIME, SECONDS,NANOSECONDS\n # csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(dateob.strftime('%H:%M:%S')) + ',' + str(\n # float(pd.to_numeric(dateob.strftime('%S.%f')))) + ',' + str(\n # pd.to_numeric(dateob.strftime('%f')) * 1000) + str(',')\n # ## VELOCITY, U,V,W,BCH4,BRSSI,TCH4\n # csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(\n # lstS[26]) + ',' + str('0') + ',' + str(lstS[26]) + ','\n # ## TRSSI, PRESS_MBAR, INLET, TEMPC, CH4, H20,C2H6\n # csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(\n # lstS[26]) + ',' + str(lstS[27]) + ',' + str(lstS[28]) + ','\n # # R, C2C1, BATTV, POWMV,CURRMA, SOCPER,LAT,LONG\n # csvWrite += str(lstS[29]) + ',' + str(lstS[30]) + ',' + str(lstS[31]) + ',' + str(\n # lstS[32]) + ',' + str(lstS[33]) + ',' + str(lstS[34]) + ',' + str(lstS[38]) + str(',') + str(\n # lstS[39])\n\n # =============================================================================\n # if not sys.platform.startswith('win'):\n # ## DATE, TIME, SECONDS,NANOSECONDS\n # csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(dateob.strftime('%H:%M:%S')) + ',' + str((int(floor(pd.to_numeric(dateob.strftime('%s.%f')))))) + ',' + str((pd.to_numeric(dateob.strftime('%f')) *1000)) + str(',')\n # ## VELOCITY, U,V,W,BCH4,BRSSI,TCH4\n # csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(lstS[26]) + ',' + str('0') + ','+ str(lstS[26]) + ','\n # ## TRSSI, PRESS_MBAR, INLET, TEMPC, CH4, H20,C2H6\n # csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(lstS[26]) + ',' + str(lstS[27]) +',' + str(lstS[28]) + ','\n # # R, C2C1, BATTV, POWMV,CURRMA, SOCPER,LAT,LONG\n # csvWrite += str(lstS[29]) + ',' + str(lstS[30]) + ',' + str(lstS[31]) + ',' + str(lstS[32]) + ','+ str(lstS[33]) + ',' + str(lstS[34]) + ',' + str(lstS[38]) + str(',') + str(lstS[39][:-1]) + str('\\n')\n # #fOut.write('\\n')\n # fOut.write(csvWrite)\n # #fOut.write('\\n')\n #\n # =============================================================================\n # if not sys.platform.startswith('win'):\n if 1 == 1:\n ## DATE, TIME, SECONDS,NANOSECONDS\n csvWrite = str(Date) + ',' + str(GPS_Time) + ',' + str(seconds) + ',' + str(nano) + str(',')\n ## VELOCITY, U,V,W,BCH4,BRSSI,TCH4\n csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(\n lstS[26]) + ',' + str('0') + ',' + str(lstS[26]) + ','\n ## TRSSI, PRESS_MBAR, INLET, TEMPC, CH4, H20,C2H6\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(\n lstS[26]) + ',' + str(lstS[27]) + ',' + str(lstS[28]) + ','\n # R, C2C1, BATTV, POWMV,CURRMA, SOCPER,LAT,LONG\n csvWrite += str(lstS[29]) + ',' + str(lstS[30]) + ',' + str(lstS[31]) + ',' + str(\n lstS[32]) + ',' + str(lstS[33]) + ',' + str(lstS[34]) + ',' + str(lstS[38]) + str(',') + str(\n lstS[39])\n # fOut.write('\\n')\n\n #### REMOVING THE FIRST BIT OF DATA (if you need to )\n if seconds >= (firsttime + (60 * float(initialTimeBack))):\n fOut.write(csvWrite)\n\n del (csvWrite)\n # xCntGoodValues += 1\n\n xCntObs += 1\n\n # sOut = str(gZIP) + \",\" + str(f) + \",\" + str(xCntObs) + \",\" + str(xCntGoodValues) + \"\\n\"\n # fLog.write(sOut)\n\n infOut.write(str(xFilename) + '\\n')\n\n fOut.close()\n fLog.close()\n infOut.close()\n\n # xDate = dateob.strftime(\"%Y%m%d\")\n\n # newfnOut = xOutDir + xCar + \"_\" + xDate + \"_dat.csv\" #set CSV output for raw data\n # newfnLog = xOutDir + xCar + \"_\" + xDate + \"_log.csv\"\n\n # print(xCar + \"\\t\" + xdat + \"\\t\" + fnOut[-22:] + \"\\t\" + str(xCntObs) + \"\\t\" + str(xCntGoodValues) + \"\\t\" + str(\n # gZIP))\n\n print(f\"{xCar} \\t {xdat} \\t {fnOut[-(17 + len(xCar)):]} \\t {xCntObs} \\t {xCntGoodValues} \\t {gZIP}\")\n\n\n import numpy as np\n radians = False\n wind_df = pd.read_csv(fnOutTemp)\n wind_df['QUADRANT'] = wind_df.apply(lambda row: get_quadrant(row['U'], row['V']), axis=1)\n\n wind_df['secnan'] = wind_df.apply(lambda row: row['SECONDS'], axis=1) # + row['NANOSECONDS']*1e-9,axis=1)\n wind_df['prev_LAT'] = wind_df.LAT.shift(periods=1)\n wind_df['next_LAT'] = wind_df.LAT.shift(periods=-1)\n wind_df['prev_LONG'] = wind_df.LONG.shift(periods=1)\n wind_df['next_LONG'] = wind_df.LONG.shift(periods=-1)\n wind_df['prev_TIME'] = wind_df.secnan.shift(periods=1)\n wind_df['next_TIME'] = wind_df.secnan.shift(periods=-1)\n wind_df['distance'] = wind_df.apply(\n lambda row: haversine(row['prev_LAT'], row['prev_LONG'], row['next_LAT'], row['next_LONG']), axis=1)\n wind_df['bearing'] = wind_df.apply(\n lambda row: calc_bearing(row['prev_LAT'], row['next_LAT'], row['prev_LONG'], row['next_LONG'], radians),\n axis=1)\n wind_df['timediff'] = wind_df.apply(lambda row: row['next_TIME'] - row['prev_TIME'], axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda row: calc_velocity(row['timediff'], row['distance']), axis=1)\n wind_df['U_cor'] = wind_df.apply(lambda row: row['U'] + row['VELOCITY'], axis=1)\n wind_df['horz_length'] = wind_df.apply(lambda row: np.sqrt(row['U_cor'] ** 2 + row['V'] ** 2), axis=1)\n wind_df['uncor_theta'] = wind_df.apply(\n lambda row: calc_bearing(row['U_cor'], row['V'], row['QUADRANT'], row['horz_length'], radians), axis=1)\n wind_df['adj_theta'] = wind_df.apply(lambda row: (row['uncor_theta'] + row['bearing']) % 360, axis=1)\n wind_df['totalWind'] = wind_df.apply(lambda row: np.sqrt(row['horz_length'] ** 2 + row['W'] ** 2), axis=1)\n wind_df['phi'] = wind_df.apply(lambda row: np.arctan(row['horz_length']), axis=1)\n wind_df['shift_CH4'] = wind_df.CH4.shift(periods=int(float(shift)))\n wind_df['raw_CH4'] = wind_df.apply(lambda row: row['BCH4'], axis=1)\n wind_df['BCH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['CH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['TCH4'] = wind_df.loc[:, ['shift_CH4']]\n\n wind_df2 = wind_df[wind_df.CH4.notnull()]\n wind_df3 = wind_df2.drop(\n ['QUADRANT', 'secnan', 'prev_LAT', 'next_LAT', 'prev_LONG', 'next_LONG', 'prev_TIME', 'next_TIME',\n 'distance', 'timediff', 'uncor_theta', 'CH4'], axis=1)\n wind_df3['CH4'] = wind_df3.loc[:, 'shift_CH4']\n wind_df3 = wind_df3.drop(['shift_CH4'], axis=1).loc[:,\n ['DATE', 'TIME', 'SECONDS', 'NANOSECONDS', 'VELOCITY', 'U', 'V', 'W', 'BCH4', 'BRSSI', 'TCH4',\n 'TRSSI',\n 'PRESS_MBAR', 'INLET', 'TEMPC', 'CH4', 'H20', 'C2H6', 'R', 'C2C1', 'BATTV', 'POWMV', 'CURRMA',\n 'SOCPER',\n 'LAT', 'LONG', 'bearing', 'U_cor', 'horz_length', 'adj_theta', 'totalWind', 'phi', 'raw_CH4']]\n wind_df4 = add_odometer(wind_df3.loc[wind_df3.totalWind.notnull(), :], 'LAT', 'LONG')\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > xMinCarSpeed, :]\n wind_df4 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :].copy().drop_duplicates()\n if bFirst:\n wind_df4.to_csv(fnOut, index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(fnOut, index=False)\n os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def readSrc_bySens(self):\n dctn = self.srcData\n dctn['header'] = []\n # dctn['header'] = ['%% This dictionary created by alog_manip.alogrd_dict']\n for msg in self.srcFile: # broken by lines, are now strings\n msg = msg[0:-1] # remove \\n at the end of the string\n if '%%' in msg:\n dctn['header'].append(msg) # assume all comments occur at beginning of file\n else:\n msg = msg.split()\n if msg[2] not in dctn: # none from this gSource yet\n dctn[msg[2]] = {}\n if msg[1] not in dctn[msg[2]]: # none in this gSource from this zMeas yet\n dctn[msg[2]][msg[1]] = {}\n try:\n dctn[msg[2]][msg[1]][float(msg[0])] = float(msg[3]) # double\n except ValueError: # it's a string\n # dimc = msg[3].split(']')[0].split('x')[1] # cols\n # dimr = msg[3].split(']')[0].split('x')[0][1:] # rows\n value_s = msg[3].split(']')[1][1:-1].split(',')\n dctn[msg[2]][msg[1]][float(msg[0])] = [float(i) for i in value_s]\n except IndexError: # it's blank\n dctn[msg[2]][msg[1]][float(msg[0])] = None # nan better?", "def process_download_other_old(self, data, meta_file_name):\n block_size = 1024\n # content-length in bytes\n self.data_len = float(data.info().get('Content-length', None))\n config_pytomo.LOG.debug('Content-length: %s' % self.data_len)\n #meta_file = open(meta_file_name, 'ab')\n #meta_file = open(meta_file_name, 'ab+')\n tries = 0\n accumulated_playback = 0\n buff_state_tracker = False\n accumulated_buffer = 0.0\n initial_data = 0\n initial_rate = 0\n byte_counter = 0\n self.state = INITIAL_BUFFERING_STATE\n start = time.time()\n while True:\n # Download and write\n before = time.time()\n if not ((before - start) > self.download_time):\n # read in bytes\n data_block = data.read(block_size)\n else:\n break\n if (not self.encoding_rate\n and tries <= config_pytomo.MAX_NB_TRIES_ENCODING):\n self.compute_encoding_rate(meta_file_name)\n tries += 1\n data_block_len = len(data_block)\n if data_block_len == 0:\n break\n after = time.time()\n self.compute_interruptions(data_block_len, after)\n if self.state == PLAYING_STATE:\n accumulated_playback += (after - before)\n if not buff_state_tracker:\n initial_duration = accumulated_buffer\n try:\n initial_rate = (initial_data * 8 / initial_duration /\n 1000)\n except ZeroDivisionError:\n initial_rate = 0\n buff_state_tracker = True\n elif self.state == BUFFERING_STATE:\n accumulated_buffer += (after - before)\n if not buff_state_tracker:\n initial_data += data_block_len\n else:\n config_pytomo.LOG.error(\"Unexpected state case\")\n break\n byte_counter += data_block_len\n block_size = self.best_block_size(after - before, data_block_len)\n instant_thp = (8e-3 * data_block_len / (after - before)\n if (after - before) != 0 else None)\n self.max_instant_thp = max(self.max_instant_thp, instant_thp)\n if config_pytomo.LOG_LEVEL == config_pytomo.DEBUG:\n # Progress message\n progress_stats = {\n 'percent_str': self.calc_percent(self._total_bytes,\n self.data_len),\n 'data_len_str': self.format_bytes(self.data_len),\n 'eta_str': self.calc_eta(start, time.time(), self.data_len,\n self._total_bytes),\n 'speed_str': self.calc_speed(start, time.time(),\n self._total_bytes),\n # in order to avoid None convertion to float in\n # report_progress and still have information\n 'instant_thp': str(instant_thp),\n 'byte_counter': self._total_bytes,\n 'current_buffer': self.current_buffer,\n }\n self.report_progress(progress_stats)\n self.set_total_bytes(byte_counter)\n self.accumulated_playback = accumulated_playback\n self.accumulated_buffer = accumulated_buffer\n self.initial_data = initial_data\n self.initial_rate = initial_rate\n return after - start", "def load(datastream):", "def load_verts(file_data, headers, scale_factor):\n\n\n def vert_from_pack(vert_data):\n return (\n (vert_data[0] * scale_factor, vert_data[1] * scale_factor, vert_data[2] * scale_factor,), #XYZ\n (vert_data[3], vert_data[4],), #UV1\n (vert_data[5], vert_data[6],), #UV2\n (vert_data[7], vert_data[8], vert_data[9],), #Normal\n (vert_data[10], vert_data[11], vert_data[12], vert_data[13],), #RGBA\n )\n\n vert_offset, vert_length = headers[10]\n vert_chunk = Struct(\"3f2f2f3f4B\") \n vert_size = vert_chunk.size\n vert_count = int(vert_length / vert_size)\n\n print (\"Found {} vertices\".format(vert_count))\n\n vertices = []\n\n for current_vert_idx in range(vert_count):\n vert_file_position = vert_offset + current_vert_idx * vert_size\n current_vert = vert_chunk.unpack(file_data[vert_file_position : vert_file_position+vert_size])\n vertices.append(vert_from_pack(current_vert))\n\n return vertices", "def _ReadMemberCompressedData(self, file_object):\n zlib_decompressor = zlib.decompressobj(-zlib.MAX_WBITS)\n compressed_data = file_object.read(self._BUFFER_SIZE)\n while compressed_data:\n data, compressed_data = self._ReadCompressedData(\n zlib_decompressor, compressed_data)\n if compressed_data:\n file_object.seek(-len(compressed_data), os.SEEK_CUR)\n\n if not data:\n break\n\n compressed_data = file_object.read(self._BUFFER_SIZE)", "def extract_data(filename, num_images, IMAGE_WIDTH):\n\n# this function definition has been taken from internet\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_WIDTH * IMAGE_WIDTH * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32) #Interpret a buffer as a 1-dimensional array\n data = data.reshape(num_images, IMAGE_WIDTH*IMAGE_WIDTH)\n return data", "def get_data_ge(logger, file):\n fp = open(file, 'rb')\n offset = 8192\n\n fp.seek(18)\n size, nframes = st.unpack('<ih',fp.read(6))\n if size != 2048:\n logger.error('GE image size unexpected: '+str(size))\n return None, 0, 0\n\n fsize = os.stat(str(fp).split(\"'\")[1]).st_size\n nframes_calc = (fsize - offset)/(2*size**2)\n\n if nframes != nframes_calc:\n logger.error('GE number frames unexpected: '+str(nframes))\n return None, 0, 0\n\n pos = offset\n fp.seek(pos)\n\n return fp, int(nframes_calc), size*size", "def debianize( strFilename ):\n \n #~ data = gzip.GzipFile( strFilename ).read();\n #~ print data;\n #~ return;\n \n #~ data = gzip.open( strFilename ).read();\n #~ print data;\n #~ return; \n \n #~ uncompressedData = bz2.BZ2File(strFilename).read()\n #~ print str(uncompressedData)\n #~ return;\n \n #~ file = open( strFilename, 'rb' );\n #~ data = file.read();\n #~ file.close();\n #~ print debug.dumpHexa( data );\n \n #~ ar = tarfile.open(strFilename, 'r:*')\n #~ for item in ar:\n #~ print( str(item) );\n #~ print( \"%s:\" % item.name );\n #~ #print debug.dumpHexa(item.buf);\n #~ #print zlib.decompress(item.buf)\n #~ #print zlib.decompress(ar.extractfile(item).read())\n #~ data = ar.extractfile(item.name).read()\n #~ print data # works !\n #~ ar.close() \n #~ return;\n \n fileLists = [];\n file = open( strFilename );\n data = file.read();\n file.close();\n \n print( \"data len: %d\" % len( data ) );\n\n nDataCompressedOffset = 0; # 132\n\n # works fine on toto.gz\n #~ f = gzip.open(strFilename, 'rb')\n #~ file_content = f.read()\n #~ print file_content\n #~ f.close() \n \n #~ decompressor = bz2.BZ2Decompressor();\n #~ uncompressed = decompressor.decompress(data[nDataCompressedOffset:]);\n \n #~ uncompressed = zlib.decompress(data[nDataCompressedOffset:]);\n \n uncompressed = decompress( data );\n print( \"uncompressed: %s\" % str( uncompressed ) );", "def read_zp(file):\n with open(file) as f_in:\n head = f_in.readline()\n units = f_in.readline()\n for line in f_in:\n try:\n zpWave[line.split(' ')[0].replace('\"', '')] = float(line.split(' ')[1])\n zpF0[line.split(' ')[0].replace('\"', '')] = float(line.split(' ')[2])\n \n except NameError:\n zpWave = {line.split(' ')[0].replace('\"', '') : float(line.split(' ')[1])}\n zpF0 = {line.split(' ')[0].replace('\"', '') : float(line.split(' ')[2])}\n \n return zpWave, zpF0", "def _load_bgzf_block(handle):\n\n # Pull in the BGZF block header information\n header, _ = _bgzf_metaheader(handle)\n XLEN = header[-4]\n BSIZE = struct.unpack('<H', handle.read(2))[0]\n\n # Expose the compressed data\n d_size = BSIZE - XLEN - 19\n d_obj = zlib.decompressobj(-15)\n data = d_obj.decompress(handle.read(d_size)) + d_obj.flush()\n\n # Checking data integrity\n CRC32, ISIZE = unpack_gzip_integrity(handle.read(_integrity_size))\n deflated_crc = zlib.crc32(data)\n if deflated_crc < 0:\n deflated_crc = deflated_crc % (1 << 32)\n if CRC32 != deflated_crc:\n raise ValueError('CRCs are not equal: is {}, not {}'.format(CRC32, deflated_crc))\n if ISIZE != len(data):\n raise ValueError('unequal uncompressed data size')\n\n return BSIZE + 1, data", "def __init__(self, fits_file, ext=0):", "def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header", "def extractOldALFOSCHeader(file):\n\n try:\n\n hdulist = pyfits.open(file)\n hdulist.close() \n\n # Extract primary header unit\n ph = extractHDU(file,0)\n\n # Form a proper timestamp from a float type UT \n ut = requireValidFloat('UT',ph) \n hh = int(ut)\n mm = int((ut-hh)*60)\n ss = int((((ut-hh)*60)-mm)*60)\n timestamp = \"%02d:%02d:%02d\" % (hh,mm,ss)\n date_obs = requireValidString('DATE-OBS', ph)\n\n fitsheader = {\n 'imagetyp': ph.get('IMAGETYP', 'na').strip() or 'na',\n 'exptime'\t: requireValidFloat('EXPTIME',ph),\t\t\t\n 'azimuth'\t: '0.00', \t\n 'austatus': 'na',\t\n 'telfocus': requireValidInt('TELFOCUS', ph),\n 'gain'\t: '0.726',\n 'alfltid'\t: requireValidInt('FILTID', ph),\n 'alfltnm'\t: requireValidString('FILTER', ph),\t \t\n 'fafltid'\t: requireValidInt('AFILTID', ph),\n 'fafltnm'\t: requireValidString('AFILTER', ph),\n 'fbfltid'\t: requireValidInt('BFILTID', ph),\n 'fbfltnm'\t: requireValidString('BFILTER', ph),\t\t\n 'rotpos' : requireValidFloat('ROTPOS',ph),\n 'apertur' : requireValidString('APERTUR', ph),\n 'ra' : '%.2f' % requireValidFloat('RA',ph),\n 'decl' : '%.2f' % requireValidFloat('DEC',ph) \n\t\n }\n \n fitsheader['dateobs'] = \"%sT%s\" % (date_obs, timestamp)\n\n # Calculate telescope altitude from airmass\n airmass = requireValidFloat('AIRMASS',ph)\n fitsheader['telalt'] = '%.2f' % (90 - degrees(pi/2 - asin(1/airmass))) \n\n # Calculate pixel scale\n cd1_1 = requireValidInt('CDELT1', ph)\n fitsheader['pscale'] = str(cd1_1 * 0.19)\n\n fitsheader['instrume'] = 'alfosc'\n\n if (fitsheader['exptime'] > 1.0) and (requireValidString('GRISM', ph) == 'Open_(Lyot)'):\n fitsheader['imaging'] = 1\n else:\n fitsheader['imaging'] = 0\t\n\n fitsheader['keys'] = ['dateobs','telalt','azimuth','rotpos','ra','decl','telfocus','pscale','gain',\n\t\t'apertur','alfltid','alfltnm','fafltid','fafltnm','fbfltid','fbfltnm',\n\t\t'imagetyp','exptime','austatus']\n\n except HeaderException, e:\n return ['ERROR']\n\n\n return fitsheader", "def __init__(self, path, max_cache=50000): # 50kb\n self.spindle = 0\n self.cache = BytesIO()\n self.max_cache = max_cache\n self.f = open(path, 'ab+')", "def MTread(fn,slMode='s',leng=0,start=0, wav_out=None, outpath='Default Folder',header=None):\n #check variables\n try:\n fn\n except NameError:\n raise Warning('Filename fn needs to be defined!')\n \n try:\n slMode\n except NameError:\n warnings.warn('slMode - the start and length mode was not defined...defaulting to s for seconds')\n slMode = 's'\n if slMode.upper() not in ['S','P']:\n warnings.warn('slMode - the start and length mode has to be either s for seconds or p for points...defaulting to s for seconds')\n slMode = 's'\n \n try:\n leng\n except NameError:\n warnings.warn('leng - the length of the data to be read in was not defined...defaulting to leng = 0, reading in all data')\n leng = 0\n if type(leng) != int:\n warnings.warn('leng - the length of the data has to be an integer...defaulting to leng = 0, reading in all data')\n leng = 0\n \n try:\n start\n except NameError:\n warnings.warn('start - the starting point or time was not defined...defaulting to start = 0, reading from the start')\n start = 0\n if type(leng) != int:\n warnings.warn('start - the starting point or time was not defined...defaulting to start = 0, reading from the start')\n start = 0\n \n # Create empty dictionaries\n HEADER = {}\n INFO = {}\n \n if leng==0: leng = np.inf\n \n #check if auxiliary data\n vcode = path.basename(fn)[2]\n aux = True if vcode in ['I','J','K','P','T','X','Y','Z'] else False\n \n #open the binary file and start reading\n with open(fn, \"rb\") as f:\n magicstring = f.read(8).decode('ascii').strip().strip('\\x00')\n if magicstring == 'DATA':\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Found Data...')\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Getting Header information...')\n HEADER['totalhdrs'] = int(f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['abbrev '] = f.read(8).decode('ascii').strip().strip('\\x00')\n HEADER['stationcode'] = f.read(3).decode('ascii').strip().strip('\\x00')\n HEADER['title'] = f.read(82).decode('ascii').strip().strip('\\x00')\n HEADER['month'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['day'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['year'] = (f.read(5).decode('ascii').strip().strip('\\x00'))\n HEADER['hours'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['minutes'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['seconds'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['msec'] = (f.read(4).decode('ascii').strip().strip('\\x00'))\n HEADER['sampling_period'] = float(f.read(15).decode('ascii').strip().strip('\\x00'))\n HEADER['samplebits'] = int(f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['wordsize'] = int(f.read(2).decode('ascii').strip().strip('\\x00'))\n \n #if HEADER['wordsize'] < HEADER['samplebits']/8:\n #warnings.warn('The samplebits field Does not fit the wordsize field. --- This file may be bad. ')\n HEADER['typemark'] = f.read(1).decode('ascii').strip().strip('\\x00')\n \n HEADER['swapping'] = f.read(1).decode('ascii').strip().strip('\\x00')\n \n HEADER['signing'] = f.read(1).decode('ascii').strip().strip('\\x00')\n HEADER['caltype'] = f.read(1).decode('ascii').strip().strip('\\x00')\n HEADER['calmin'] = float(f.read(15).decode('ascii').strip().strip('\\x00'))\n HEADER['calmax'] = float(f.read(15).decode('ascii').strip().strip('\\x00'))\n HEADER['calunits'] = f.read(40).decode('ascii').strip().strip('\\x00')\n HEADER['recordsize'] = int(f.read(6).decode('ascii').strip().strip('\\x00'))\n HEADER['sourcevers'] = f.read(9).decode('ascii').strip().strip('\\x00')\n HEADER['sourcesn'] = f.read(16).decode('ascii').strip().strip('\\x00')\n print(HEADER)\n \n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Getting Meta data...')\n INFO['filename'] = fn\n INFO['filesize'] = path.getsize(fn)\n INFO['srate'] = 1/HEADER['sampling_period']\n INFO['when'] = datetime.strptime(HEADER['year'] + '/' + HEADER['month'] + '/' + HEADER['day'] + ' ' + HEADER['hours'] + ':' + HEADER['minutes'] + ':' + HEADER['seconds'] + '.' + HEADER['msec'],'%Y/%m/%d %H:%M:%S.%f')\n INFO['datenumber'] = date.toordinal(INFO['when'])\n \n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Reading Data...')\n if slMode.upper() == 'P': # Start & Length specified in # Points (samples)\n INFO['whenC'] = INFO['when'] + timedelta(seconds=start/INFO['srate'])\n INFO['datenumber'] = INFO['datenumber'] + (start/INFO['srate']/24/3600)\n else:\n INFO['whenC'] = INFO['when'] + timedelta(seconds=start) # Corrected start time (with offset)\n INFO['datenumber'] = INFO['datenumber'] + start/24/3600\n \n if 'wordsize' in HEADER:\n if HEADER['wordsize'] == '':\n HEADER['wordsize'] = 2\n else:\n HEADER['wordsize'] = 2\n \n INFO['nsamp'] = int((INFO['filesize'] - 512 * HEADER['totalhdrs']) / HEADER['wordsize'])\n INFO['seconds'] = INFO['nsamp'] / INFO['srate']\n \n if leng > 0: # Only load data if it's been asked for.\n if any(x in HEADER['swapping'] for x in ['S','L','s','l']):\n mode = '<'\n else:\n mode = '>'\n \n status = 0\n if slMode.upper() == 'P': # specified start time in sample 'P'oints rather than time\n try:\n f.seek(int(512 * HEADER['totalhdrs']) + int(start) * HEADER['wordsize']) # Skip by samples/points\n except:\n status = 1\n else:\n try:\n f.seek(int(512 * HEADER['totalhdrs']) + round(start * INFO['srate'] * HEADER['wordsize'])) # skip by time (seconds)\n except:\n status = 1\n \n if status == 0: # If status is nonzero, we probably went past the end of the file.\n if HEADER['caltype'].upper() == 'F':\n if not any(x == HEADER['wordsize'] for x in [4,8]):\n f.close(f)\n #raise Warning('Invalid word size! Only valid Float sizes are four or eight bytes.')\n binType = 'float' + str(HEADER['wordsize'] * 8)\n else:\n binType = 'bit' + str(HEADER['wordsize'] * 8)\n if any(x in HEADER['signing'] for x in ['U','u']):\n binType = 'u' + binType\n \n \n if slMode.upper() == 'P':\n if leng == np.inf:\n fi = f.read()\n else:\n fi = f.read(leng)\n \n else:\n if leng == np.inf:\n fi = f.read()\n else:\n fi = f.read(int(leng*INFO['srate'])*2)\n if aux:\n fmt = '%c%iH' %(mode,len(fi)/2)\n else:\n fmt = '%c%ih' %(mode,len(fi)/2)\n p = unpack(fmt,fi)\n \n calmax = HEADER['calmax']\n calmin = HEADER['calmin']\n \n if (type(calmin) == float and type(calmax) == float and ((calmin + np.spacing(1)) < calmax) and HEADER['caltype'].upper() != 'F'):\n calmax = HEADER['calmax']\n calmin = HEADER['calmin']\n if HEADER['signing'].upper() == 'U':\n bitmin = 0\n bitmax = 2**HEADER['samplebits'] - 1\n else:\n bitmin = -(2**(HEADER['samplebits']-1))\n bitmax = (2**(HEADER['samplebits']-1)) - 1\n \n \n multiplier = (calmax - calmin) / (bitmax - bitmin)\n p = (np.array(p) - bitmin) * multiplier + calmin\n else:\n p = []# Output an empty matrix if requested data is beyond the length of the current file\n \n else:\n p = [] # Also output an empty matrix of zero length LENGTH input is requested (ie, only return header/info values)\n INFO['count'] = 0\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Returning data...')\n \n #check if it is a data or aux file\n \n if aux:\n p = pd.DataFrame({'Value':p})\n p['VarCode'] = vcode\n p['mission'] = HEADER['title'].split('-')[0] \n p['sampling_rate'] = HEADER['sampling_period']\n p['nSample'] = np.arange(1,p.shape[0]+1)\n p['start_time'] = pd.to_datetime(HEADER[\"year\"] + \"-\" + HEADER[\"month\"] + \"-\" + HEADER[\"day\"] + \" \" + HEADER[\"hours\"] + \":\" +\\\n HEADER[\"minutes\"] + \":\" + HEADER[\"seconds\"] + \".\" + HEADER[\"msec\"])\n p['sec_since_start'] = p['nSample'] * p['sampling_rate']\n p['Time'] = p['start_time'] + pd.to_timedelta(p['sec_since_start'], unit='s')\n return(p,HEADER,'aux')\n else:\n if wav_out != None:\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Saving wav file...' + HEADER['title'].split('-')[0] )\n if 'p':\n if outpath=='Default Folder':\n outpath = path.dirname(fn)\n outfn = outpath +'\\\\' + INFO['when'].strftime('D%m%d%YT%H%M%S') + '_' + path.basename(fn)[:-3] + '.wav'\n sr = int(INFO['srate'])\n data = p\n write(outfn,int(sr), np.int16(data/(abs(data).max())*np.iinfo(np.int16).max))\n \n if header != None:\n if outpath=='Default Folder':\n outpath = path.dirname(fn)\n hh = pd.DataFrame.from_dict(HEADER, orient='index')\n hh.to_csv( outpath +'\\\\' + INFO['when'].strftime('D%m%d%YT%H%M%S') + '_' + path.basename(fn)[:-3] + '.csv')\n if 'p':\n return p,HEADER,INFO", "def read (self, file):\n\t\tself.unpack (file.read (self.size()))", "def __init__(self, **kwargs):\n super(ImageExporter, self).__init__(**kwargs)\n # COMPRESS=PACKBITS\n # PIL: TIFF uncompressed, or Packbits, LZW, or JPEG compressed images. In the current version, PIL always writes uncompressed TIFF files\n # http://linfiniti.com/2011/05/gdal-efficiency-of-various-compression-algorithms/\n # predictor for 'DEFLATE' or 'LZW' : 1 or 2\n i_tiff_compression_predictor=2\n # zlevel for 'DEFLATE' : 1 to 9\n i_tiff_compression_zlevel=8\n self.jpg_quality=75\n self.tiff_compression=[]\n self._metadata = []\n if self.reader.metadata_input:\n self.metadata_input=self.reader.metadata_input\n self.tiff_compress = kwargs.get('tiff_compression', \"LZW\")\n self.tiff_compress =self.tiff_compress.upper()\n self.jpg_quality = kwargs.get('jpg_quality', self.jpg_quality)\n if self.jpg_quality < 1 or self.jpg_quality > 95:\n self.jpg_quality=75\n i_tiff_compression_predictor = kwargs.get('tiff_predictor', i_tiff_compression_predictor)\n if i_tiff_compression_predictor < 1 or i_tiff_compression_predictor > 2:\n i_tiff_compression_predictor=2\n i_tiff_compression_zlevel = kwargs.get('tiff_zlevel', i_tiff_compression_zlevel)\n if i_tiff_compression_zlevel < 1 or i_tiff_compression_zlevel > 9:\n i_tiff_compression_predictor=8\n if self.tiff_compress == \"PACKBITS\" :\n self.tiff_compression.append('COMPRESS=PACKBITS')\n elif self.tiff_compress == \"DEFLATE\":\n self.tiff_compression.append('COMPRESS=%s' % 'DEFLATE')\n self.tiff_compression.append('PREDICTOR=%d' % i_tiff_compression_predictor)\n self.tiff_compression.append('ZLEVEL=%d' % i_tiff_compression_zlevel)\n elif self.tiff_compress == \"LZW\":\n self.tiff_compression.append('COMPRESS=%s' % 'LZW')\n self.tiff_compression.append('PREDICTOR=%d' % i_tiff_compression_predictor)\n elif self.tiff_compress == \"NONE\":\n self.tiff_compression.append('COMPRESS=NONE')", "def _readBTS(self,fname):\n with BinaryFile(fname) as f:\n #\n # read header info\n #\n if self.verbose: print('Reading header information from',fname)\n\n ID = f.read_int2()\n assert( ID==7 or ID==8 )\n if ID==7: filetype = 'non-periodic'\n elif ID==8: filetype = 'periodic'\n else: filetype = 'UNKNOWN'\n if self.verbose:\n print(' id= {:d} ({:s})'.format(ID,filetype))\n\n # - read resolution settings\n self.NZ = f.read_int4()\n self.NY = f.read_int4()\n self.Ntower = f.read_int4()\n if self.verbose:\n print(' NumGrid_Z,_Y=',self.NZ,self.NY)\n print(' ntower=',self.Ntower)\n self.N = f.read_int4()\n self.dz = f.read_float(dtype=self.realtype)\n self.dy = f.read_float(dtype=self.realtype)\n self.dt = f.read_float(dtype=self.realtype)\n self.period = self.realtype(self.N * self.dt)\n self.Nsize = 3*self.NY*self.NZ*self.N\n if self.verbose:\n print(' nt=',self.N)\n print(' (problem size: {:d} points)'.format(self.Nsize))\n print(' dz,dy=',self.dz,self.dy)\n print(' TimeStep=',self.dt)\n print(' Period=',self.period)\n\n # - read reference values\n self.uhub = f.read_float(dtype=self.realtype)\n self.zhub = f.read_float(dtype=self.realtype) # NOT USED\n self.zbot = f.read_float(dtype=self.realtype)\n if self.Umean is None:\n self.Umean = self.uhub\n if self.verbose:\n print(' Umean = uhub =',self.Umean,\n '(for calculating fluctuations)')\n else: # user-specified Umean\n if self.verbose:\n print(' Umean =',self.Umean,\n '(for calculating fluctuations)')\n print(' uhub=',self.uhub,' (NOT USED)')\n if self.verbose:\n print(' HubHt=',self.zhub,' (NOT USED)')\n print(' Zbottom=',self.zbot)\n\n # - read scaling factors\n self.Vslope = np.zeros(3,dtype=self.realtype)\n self.Vintercept = np.zeros(3,dtype=self.realtype)\n for i in range(3):\n self.Vslope[i] = f.read_float(dtype=self.realtype)\n self.Vintercept[i] = f.read_float(dtype=self.realtype)\n if self.verbose:\n # output is float64 precision by default...\n print(' Vslope=',self.Vslope)\n print(' Vintercept=',self.Vintercept)\n\n # - read turbsim info string\n nchar = f.read_int4()\n version = f.read(N=nchar)\n if self.verbose: print(version)\n\n #\n # read normalized data\n #\n # note: need to specify Fortran-order to properly read data using np.nditer\n t0 = time.process_time()\n if self.verbose: print('Reading normalized grid data')\n\n self.U = np.zeros((3,self.NY,self.NZ,self.N),order='F',dtype=self.realtype)\n self.T = np.zeros((self.N,self.NY,self.NZ))\n if self.verbose:\n print(' U size :',self.U.nbytes/1024.**2,'MB')\n\n for val in np.nditer(self.U, op_flags=['writeonly']):\n val[...] = f.read_int2()\n self.U = self.U.swapaxes(3,2).swapaxes(2,1) # new shape: (3,self.N,self.NY,self.NZ)\n\n if self.Ntower > 0:\n if self.verbose:\n print('Reading normalized tower data')\n self.Utow = np.zeros((3,self.Ntower,self.N),\n order='F',dtype=self.realtype)\n if self.verbose:\n print(' Utow size :',self.Utow.nbytes/1024.**2,'MB')\n for val in np.nditer(self.Utow, op_flags=['writeonly']):\n val[...] = f.read_int2()\n\n if self.verbose:\n print(' Read velocitiy fields in',time.process_time()-t0,'s')\n \n #\n # calculate dimensional velocity\n #\n if self.verbose:\n print('Calculating velocities from normalized data')\n for i in range(3):\n self.U[i,:,:,:] -= self.Vintercept[i]\n self.U[i,:,:,:] /= self.Vslope[i]\n if self.Ntower > 0:\n self.Utow[i,:,:] -= self.Vintercept[i]\n self.Utow[i,:,:] /= self.Vslope[i]\n self.U[0,:,:,:] -= self.Umean # uniform inflow w/ no shear assumed\n\n print(' u min/max [',np.min(self.U[0,:,:,:]),\n np.max(self.U[0,:,:,:]),']')\n print(' v min/max [',np.min(self.U[1,:,:,:]),\n np.max(self.U[1,:,:,:]),']')\n print(' w min/max [',np.min(self.U[2,:,:,:]),\n np.max(self.U[2,:,:,:]),']')\n\n self.scaling = np.ones((3,self.NZ))\n\n #\n # calculate coordinates\n #\n if self.verbose:\n print('Calculating coordinates')\n #self.y = -0.5*(self.NY-1)*self.dy + np.arange(self.NY,dtype=self.realtype)*self.dy\n self.y = np.arange(self.NY,dtype=self.realtype)*self.dy\n self.z = self.zbot + np.arange(self.NZ,dtype=self.realtype)*self.dz\n #self.ztow = self.zbot - np.arange(self.NZ,dtype=self.realtype)*self.dz #--NOT USED\n\n self.t = np.arange(self.N,dtype=self.realtype)*self.dt\n if self.verbose:\n print('Read times [',self.t[0],self.t[1],'...',self.t[-1],']')", "def read(self, src):\n self.read_mesh(src)\n self.read_data(src)", "def _image_data(self):\n if self.header['Image']['bytes per pixel'] == 2:\n # 16-bit unsigned integers, short\n dt = np.dtype(self._bo + 'H')\n elif self.header['Image']['bytes per pixel'] == 4:\n # 32-bit unsigned integers, int\n dt = np.dtype(self._bo + 'I')\n\n shape = [self.header['Image']['planes'],\n self.header['Image']['masses'],\n self.header['Image']['height'],\n self.header['Image']['width']]\n\n self.fh.seek(self.header['header size'])\n\n compressedfiles = (\n gzip.GzipFile,\n bz2.BZ2File,\n tarfile.ExFileObject,\n lzma.LZMAFile,\n io.BytesIO\n )\n\n # fromfile is about 2x faster than frombuffer(fh.read())\n if isinstance(self.fh, compressedfiles):\n data = np.frombuffer(self.fh.read(), dtype=dt).reshape(shape)\n else:\n data = np.fromfile(self.fh, dtype=dt).reshape(shape)\n\n # We want to have a cube of contiguous data (stacked images) for each\n # mass. Swap axes 0 and 1. Returns a view, so make full copy to make\n # data access faster.\n data = data.swapaxes(0, 1).copy()\n\n self.data = xarray.DataArray(data,\n dims=('species', 'frame', 'y', 'x'),\n coords={'species': ('species', list(self.header['label list']))},\n attrs={'unit': 'counts'})", "def _read_dx(self, FN):\n if FN.endswith('.dx'):\n F = open(FN, 'r')\n else:\n import gzip\n F = gzip.open(FN, 'r')\n\n # Read the header\n line = F.readline()\n while line.find('object') == -1:\n line = F.readline()\n header = {}\n header['counts'] = [int(x) for x in line.split(' ')[-3:]]\n for name in ['origin', 'd0', 'd1', 'd2']:\n header[name] = [float(x) for x in F.readline().split(' ')[-3:]]\n F.readline()\n header['npts'] = int(F.readline().split(' ')[-3])\n\n # Test to make sure the grid type is okay.\n # These conditions are not absolultely essential,\n # but they reduce the number of subtraction operations.\n if not (header['d0'][1] == 0 and header['d0'][2] == 0\n and header['d1'][0] == 0 and header['d1'][2] == 0\n and header['d2'][0] == 0 and header['d2'][1] == 0):\n raise Exception('Trilinear grid must be in original basis')\n if not (header['d0'][0] > 0 and header['d1'][1] > 0\n and header['d2'][2] > 0):\n raise Exception('Trilinear grid must have positive coordinates')\n\n # Read the data\n vals = np.ndarray(shape=header['npts'], dtype=float)\n index = 0\n while index < header['npts']:\n line = F.readline()[:-1]\n items = [float(item) for item in line.split()]\n vals[index:index + len(items)] = items\n index = index + len(items)\n F.close()\n\n data = {\n 'origin':np.array(header['origin']), \\\n 'spacing':np.array([header['d0'][0],header['d1'][1],header['d2'][2]]), \\\n 'counts':np.array(header['counts']), \\\n 'vals':vals}\n return data", "def _read_file(self):\n\n with open(self.file_name, 'rb') as f:\n new_test = struct.unpack('<l', f.read(8)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n old_test = struct.unpack('<h', f.read(6)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n other_test = struct.unpack('<l', f.read(20)[16:])[0]\n f.close()\n\n open_file = open(self.file_name, 'rb')\n\n if (other_test==202):\n raw = open_file.read(1236)[11:]\n self.model = '202'\n elif ((not new_test==102) and old_test==102):\n raw = open_file.read(1133)\n self.model = '102old'\n elif (new_test==102 and old_test==102):\n raw = open_file.read(1224)\n self.model = '102new'\n\n self.header = DpHeader(raw, self.model)\n\n self.data = DpData(open_file, \n self.model, \n self.header.interferogram_size, \n self.header.number_of_coadds, \n 2048*self.header.zero_fill,\n self.header.laser_wavelength_microns, \n self.header.dispersion_constant_xm,\n self.header.dispersion_constant_xb)\n\n open_file.close()", "def LoadZVIMetaData(filename):\r\n print \"Loading zvi file metadata...\"\r\n\r\n ole = OleFileIO_PL.OleFileIO(filename)\r\n #ole.dumpdirectory()\r\n metadata=ole.extract_metadata()\r\n (channeldict,Width,Height,MosaicSizeX,MosaicSizeY,ScaleFactorX,ScaleFactorY,\\\r\n channels,XPositions,YPositions,FocusPositions,XCoors,YCoors,ExposureTimes)=metadata\r\n Xpos=np.array(XPositions);\r\n Ypos=np.array(YPositions);\r\n\r\n extent=[Xpos.min()-(Width/2)*ScaleFactorX,Xpos.max()+(Width/2)*ScaleFactorX,\\\r\n Ypos.max()+(Height/2)*ScaleFactorY,Ypos.min()-(Height/2)*ScaleFactorY]\r\n \r\n return extent", "def _read_data(self, fh, byteorder='>'):\r\n fh.seek(len(self.header))\r\n data = fh.read()\r\n dtype = 'u1' if self.maxval < 256 else byteorder + 'u2'\r\n depth = 1 if self.magicnum == b\"P7 332\" else self.depth\r\n shape = [-1, self.height, self.width, depth]\r\n size = numpy.prod(shape[1:])\r\n if self.magicnum in b\"P1P2P3\":\r\n data = numpy.array(data.split(None, size)[:size], dtype)\r\n data = data.reshape(shape)\r\n elif self.maxval == 1:\r\n shape[2] = int(math.ceil(self.width / 8))\r\n data = numpy.frombuffer(data, dtype).reshape(shape)\r\n data = numpy.unpackbits(data, axis=-2)[:, :, :self.width, :]\r\n else:\r\n data = numpy.frombuffer(data, dtype)\r\n data = data[:size * (data.size // size)].reshape(shape)\r\n if data.shape[0] < 2:\r\n data = data.reshape(data.shape[1:])\r\n if data.shape[-1] < 2:\r\n data = data.reshape(data.shape[:-1])\r\n if self.magicnum == b\"P7 332\":\r\n rgb332 = numpy.array(list(numpy.ndindex(8, 8, 4)), numpy.uint8)\r\n rgb332 *= [36, 36, 85]\r\n data = numpy.take(rgb332, data, axis=0)\r\n return data", "def parse(self, calibration_px=1.0):\n self.isParsingNeeded = False\n self.meta_data = {}\n self.data = []\n #CZI files\n if self.extend == '.czi':\n with czifile.CziFile(self.file_path) as czi:\n data = czi.asarray()\n Header_Metadata = str(czi).split('<ImageDocument>')\n string = '<ImageDocument>'+Header_Metadata[1]\n #print(string.strip(\"'\"))\n metadata = XMLET.fromstring(string.strip(\"'\"))\n try:\n #Query XML fore the metadata for picture shape(X;Y;Z-stacks).\n #Picture Shape.\n shapes = metadata.findall('./Metadata/Information/Image')[0]\n self.meta_data[\"ShapeSizeX\"] = int(shapes.findall('SizeX')[0].text)\n self.meta_data[\"ShapeSizeY\"] = int(shapes.findall('SizeY')[0].text)\n try:\n self.meta_data[\"ShapeSizeZ\"] = int(shapes.findall('SizeZ')[0].text)\n except:\n self.meta_data[\"ShapeSizeZ\"] = 1\n #Get the hyperstack dimension if the image is a hyperstack.\n try:\n self.meta_data[\"ShapeSizeC\"] = int(shapes.findall('SizeC')[0].text)\n except:\n self.meta_data[\"ShapeSizeC\"] = 1\n print(\"No info of color channels 1 assumed\")\n #Get physical pixel size of image(nm/px) convert to(µm/px).\n PixelSizes = metadata.findall('./Metadata/Scaling/Items/Distance')\n self.meta_data['SizeX'] = float(PixelSizes[0].findall('Value')[0].text)*10**6\n self.meta_data['SizeY'] = float(PixelSizes[1].findall('Value')[0].text)*10**6\n self.meta_data['SizeZ'] = float(PixelSizes[2].findall('Value')[0].text)*10**6\n except(ValueError):\n print (\"Metadata fail\")\n\n #Tiff files.\n #Tiff files are problematic because they most likely wont contain the necessary metadata.\n #Try to get the shape info over common dimensions.\n elif self.extend == '.tif':\n with tifffile.TiffFile(self.file_path) as tif:\n data = tif.asarray()\n for shape in data.shape:\n if shape <5:\n self.meta_data[\"ShapeSizeC\"] = shape\n elif shape <40:\n self.meta_data[\"ShapeSizeZ\"] = shape\n else:\n self.meta_data[\"ShapeSizeY\"] = shape\n self.meta_data[\"ShapeSizeX\"] = shape\n\n #Read Lsm Files.\n elif self.extend == '.lsm':\n with tifffile.TiffFile(self.file_path) as tif:\n data = tif.asarray(memmap=True)\n headerMetadata = str(tif.pages[0].cz_lsm_scan_info)\n metadataList = headerMetadata.split(\"\\n*\")\n #Get image shape from lsm header SizeC=0 if not given.\n for shapes in metadataList:\n if \"images_height\" in shapes:\n self.meta_data[\"ShapeSizeX\"]= int(shapes.split()[-1])\n if \"images_width\" in shapes:\n self.meta_data[\"ShapeSizeY\"]= int(shapes.split()[-1])\n if \"images_number_planes\" in shapes:\n self.meta_data[\"ShapeSizeZ\"]= int(shapes.split()[-1])\n if \"images_number_channels\" in shapes:\n self.meta_data[\"ShapeSizeC\"]= int(shapes.split()[-1])\n #Get physical pixel size of image(nm/px) convert to(µm/px).\n data = np.swapaxes(data,1,2)\n lsm_header = str(tif.pages[0].tags.cz_lsm_info)\n LsmInfo = lsm_header.split(\", \")\n i = 0\n #Query for pixel size.\n for element in LsmInfo:\n if \"e-0\" in element:\n i += 1\n if i == 1:\n self.meta_data['SizeX'] = (float(element)*10**6)\n if i == 2:\n self.meta_data['SizeY'] = (float(element)*10**6)\n if i == 3:\n self.meta_data['SizeZ'] = (float(element)*10**6)\n\n elif self.extend == \".png\":\n data = misc.imread(self.file_path)\n data = np.expand_dims(np.expand_dims(data[...,0],0),0)\n self.meta_data[\"ShapeSizeC\"] = 1\n self.meta_data[\"ShapeSizeZ\"] = 1\n self.meta_data[\"ShapeSizeX\"] = data.shape[2]\n self.meta_data[\"ShapeSizeY\"] = data.shape[3]\n self.meta_data[\"SizeZ\"] = 1\n self.meta_data[\"SizeX\"] = 0.01\n self.meta_data[\"SizeY\"] = 0.01\n #Bring all formats in the same shape.\n self.data = np.reshape(data,(self.meta_data[\"ShapeSizeC\"],self.meta_data[\"ShapeSizeZ\"],self.meta_data[\"ShapeSizeX\"],self.meta_data[\"ShapeSizeY\"]))\n self.meta_data['ChannelNum'] = self.meta_data[\"ShapeSizeC\"]\n #Set pixel size to manuell value if there are no metadata.\n if self.meta_data == {}:\n self.set_calibration(calibration_px)\n #Set the box for manuel calibration to the actuell pixel size.", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def load_data(self, f): \n self.sampling = True\n self.reads = np.load(f)\n self.total = self.reads.shape[0]", "def read_data(self):\n if not self.header['data included']:\n pass\n elif self.header['file type'] in (21, 26):\n self._isotope_data()\n if os.path.exists(self.filename + '_txt'):\n self._isotope_txt_data()\n elif self.header['file type'] == 22:\n # line scan types, no ImageHeader\n warnings.warn('No data read for line scan, fix')\n pass\n elif self.header['file type'] in (31, 35):\n self._beamstability_data()\n else:\n self._image_data()", "def test_mcg_data_compression(\n self, mcg_obj, awscli_pod, bucket_factory, bucketclass_dict\n ):\n download_dir = \"/aws/compression/\"\n awscli_pod.exec_cmd_on_pod(\n command=craft_s3_command(\n f\"cp s3://{constants.TEST_FILES_BUCKET}/enwik8 {download_dir}\"\n ),\n out_yaml_format=False,\n )\n bucketname = bucket_factory(1, bucketclass=bucketclass_dict)[0].name\n full_object_path = f\"s3://{bucketname}\"\n sync_object_directory(awscli_pod, download_dir, full_object_path, mcg_obj)\n # For this test, enwik8 is used in conjunction with Snappy compression\n # utilized by NooBaa. Snappy consistently compresses 35MB of the file.\n mcg_obj.check_data_reduction(bucketname, 35 * 1024 * 1024)", "def _loadBinaryData_compressed(self, filename, with_axis=None): \n self.set_data_writable() \n _data = numpy.load(filename)[\"data\"]\n self.data = self._extract_data_with_axis(_data, with_axis)\n self.set_data_protected()", "def readFastaFile(filename):", "def preload(self):\n # load the objects\n for otype, fname in self.TYPE2NAME.items():\n if fname:\n path = os.path.join(self.anodir, fname + \".gz\")\n if os.path.isfile(path):\n with gzip.open(path, \"rt\") as handler:\n for line in handler:\n omap = json.loads(line)\n cls = self.TYPE2CLASS[otype]\n item = cls.from_map(omap, self)\n self.caches[otype][item.id] = item", "def try3():\n path = '/Users/mayankkejriwal/datasets/eswc2016/'\n total = 10\n count = 1\n with gzip.open(path+'freebase-rdf-latest.gz', 'rb') as f:\n for line in f:\n print 'line : ',\n print line\n if count > total:\n break\n count += 1", "def parse_header(self):\n self._get_decompressor()\n whs = jpeg.ffi.new(\"int[]\", 3)\n whs_base = int(jpeg.ffi.cast(\"size_t\", whs))\n whs_itemsize = int(jpeg.ffi.sizeof(\"int\"))\n n = self.lib_.tjDecompressHeader2(\n self.decompressor.handle_,\n jpeg.ffi.cast(\"unsigned char*\",\n self.source.__array_interface__[\"data\"][0]),\n self.source.nbytes,\n jpeg.ffi.cast(\"int*\", whs_base),\n jpeg.ffi.cast(\"int*\", whs_base + whs_itemsize),\n jpeg.ffi.cast(\"int*\", whs_base + whs_itemsize + whs_itemsize))\n if n:\n raise JPEGRuntimeError(\"tjDecompressHeader2() failed with error \"\n \"%d and error string %s\" %\n (n, self.get_last_error()), n)\n self.width = int(whs[0])\n self.height = int(whs[1])\n self.subsampling = int(whs[2])", "def _read_files(self) -> MMD:\n\t\theaders = []\n\t\tbodies = []\n\t\tif self.config.file_type == FileType.CSV:\n\t\t\tif self.config.source_uris.endswith('.zip'):\n\t\t\t\twith ZipFile(self.config.source_uris) as zf:\n\t\t\t\t\tfor item in zf.namelist():\n\t\t\t\t\t\tif item.endswith('.csv'):\n\t\t\t\t\t\t\t# with zf.open(item, 'r') as infile:\n\t\t\t\t\t\t\tcsv_reader = csv.reader(TextIOWrapper(zf.open(item, 'r'), 'utf-8'))\n\t\t\t\t\t\t\theaders.append(next(csv_reader))\n\t\t\t\t\t\t\t# need to find a more efficient way, the csv reader is a generator that can only be used once\n\t\t\t\t\t\t\tbodies.append(list(csv_reader))\n\t\t\telif self.config.source_uris.endswith('.csv'):\n\t\t\t\tfor uri in self.config.source_uris:\n\t\t\t\t\tif uri.endswith('.csv'):\n\t\t\t\t\t\tcsv_reader = csv.reader(open(uri, newline='', encoding='utf-8'))\n\t\t\t\t\t\theaders.append(next(csv_reader))\n\t\t\t\t\t\tbodies.append(list(csv_reader))\n\t\telif self.config.file_type == FileType.CNSCHEMA:\n\t\t\theader = ['@id', 'label_@language', 'label_@value']\n\t\t\tbody = []\n\t\t\twith open(self.config.source_uris, 'r') as load_f:\n\t\t\t\tload_dict = json.load(load_f)\n\t\t\t\theader.extend(load_dict['@context'].keys())\n\t\t\t\theader = [h for h in header if h not in ['label', 'range', 'domain', 'subClassOf']]\n\t\t\t\ttmp_h = [h for h in header if h not in ['@id', '@language', '@value']]\n\t\t\t\tfor item in load_dict['@graph']:\n\t\t\t\t\tif item['@id'].split('/')[-2] == 'resource':\n\t\t\t\t\t\trow = [item['@id'], item['label']['@language'], item['label']['@value']]\n\t\t\t\t\t\tfor h in tmp_h:\n\t\t\t\t\t\t\tif h in item:\n\t\t\t\t\t\t\t\trow.append(item[h])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\trow.append(None)\n\t\t\t\t\t\tbody.append(tuple(row))\n\t\t\theaders.append(tuple(header))\n\t\t\tbodies.append(body)\n\t\telif self.config.file_type == FileType.OPENBASE:\n\t\t\theader = []\n\t\t\tbody = []\n\t\t\twith open(self.config.source_uris, 'r') as load_f:\n\t\t\t\tfor line in load_f:\n\t\t\t\t\trow = []\n\t\t\t\t\tflat_line = flatten_json(json.loads(line))\n\t\t\t\t\tfor key in flat_line:\n\t\t\t\t\t\tif key not in header:\n\t\t\t\t\t\t\theader.append(key)\n\t\t\t\t\tfor h in header:\n\t\t\t\t\t\tif h in flat_line:\n\t\t\t\t\t\t\trow.append(flat_line[h])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\trow.append(None)\n\t\t\t\t\tbody.append(row)\n\t\t\tfor item in body:\n\t\t\t\tif len(item) < len(header):\n\t\t\t\t\titem.extend([None for i in range(len(header) - len(item))])\n\t\t\theaders.append(tuple(header))\n\t\t\tbodies.append(tuple([tuple(item) for item in body]))\n\t\telif self.config.file_type == FileType.OPENKS:\n\t\t\t# knowledge graph dataset loading \n\t\t\tif os.path.exists(self.config.source_uris + '/entities') and os.path.exists(self.config.source_uris + '/triples'):\n\t\t\t\theaders = [['entities'], ['triples']]\n\t\t\t\tfor file in ['entities', 'triples']:\n\t\t\t\t\ttmp = []\n\t\t\t\t\twith open(self.config.source_uris + '/' + file, 'r') as load_f:\n\t\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\t\ttmp.append(tuple([item.strip() for item in line.split('\\t')]))\n\t\t\t\t\t\tbodies.append(tuple(tmp))\n\t\t\t# general text dataset loading\n\t\t\telif os.path.exists(self.config.source_uris + '/train') and os.path.exists(self.config.source_uris + '/valid'):\n\t\t\t\theaders = [['train'], ['valid']]\n\t\t\t\tfor file in ['train', 'valid']:\n\t\t\t\t\ttmp = []\n\t\t\t\t\twith open(self.config.source_uris + '/' + file, 'r') as load_f:\n\t\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\t\ttmp.append(tuple([item.strip() for item in line.split('@@')]))\n\t\t\t\t\t\tbodies.append(tuple(tmp))\n\t\t\telse:\n\t\t\t\tlogger.warn('Only allows loading with entities and triples for now!')\n\t\t\t\traise IOError\n\t\telif self.config.file_type == FileType.NERO:\n\t\t\theaders = [['unlabeled_data'], ['predict'], ['pattern']]\n\t\t\tfor file in ['unlabeled_data', 'predict', 'pattern']:\n\t\t\t\ttmp = []\n\t\t\t\twith open(self.config.source_uris + '/' + file + '.json', 'r') as load_f:\n\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\ttmp.append(line.strip())\n\t\t\t\t\tbodies.append(tuple(tmp))\n\n\t\tmmd.name = self.config.data_name\n\t\tmmd.headers = headers\n\t\tmmd.bodies = bodies\n\t\treturn mmd", "def _load_mock_bgs_mxxl_file_fits(filename):\n desitarget.io.check_fitsio_version()\n data = fitsio.read(filename,\n columns= ['objid','brickid',\n 'RA','DEC','Z', 'R'])\n\n objid = data['objid'].astype('i8')\n brickid = data['brickid'].astype('i8')\n ra = data['RA'].astype('f8') % 360.0 #enforce 0 < ra < 360\n dec = data['DEC'].astype('f8')\n SDSSr_true = data['R'].astype('f8')\n zred = data['Z'].astype('f8')\n\n return {'objid':objid,'brickid':brickid,\n 'RA':ra, 'DEC':dec, 'Z': zred , \n 'SDSSr_true':SDSSr_true}", "def yomaha2nc4(finame, foname, line_buffer=100000, zlib=False):\n\n MISS_OUT = -999\n\n tic = tm.time()\n\n print \"yomaha2nc4\"\n print \"working with\"\n print finame\n print foname\n\n #=====================================================================\n # Set up the metadata\n #=====================================================================\n\n missing = ['-999.9999' if i in [0, 8, 15, 18, 21]\n else '-99.9999' if i in [1, 9, 16, 19, 22]\n else '-999.999' if i in [3, 10, 17, 20, 23]\n else '-999.9' if i == 2\n else '-999.99' if i in [4, 5, 6, 7, 11, 12, 13, 14]\n else '-128' if i == 27\n else '-999'\n for i in range(28)]\n\n variables = [\n {'name': 'x_deep',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude'},\n {'name': 'y_deep',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude'},\n {'name': 'z_park',\n 'unit': 'dbar',\n 'long_name': 'Parking Pressure'},\n {'name': 't_deep',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time'},\n {'name': 'u_deep',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of zonal deep velocity'},\n {'name': 'v_depth',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of meridional deep velocity'},\n {'name': 'e_u_deep',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of error of zonal deep velocity'},\n {'name': 'e_v_deep',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of error of meridional deep velocity'},\n {'name': 'x_surf',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude'},\n {'name': 'y_surf',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude'},\n {'name': 't_surf',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time'},\n {'name': 'u_surf',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of zonal velocity at sea surface'},\n {'name': 'v_surf',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of meridional velocity at sea surface'},\n {'name': 'e_u_surf',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of error of zonal velocity at sea surface'},\n {'name': 'e_v_surf',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of error of meridional velocity at sea surface'},\n {'name': 'x_last_prev',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude of the last fix at the surface during previous cycle'},\n {'name': 'y_last_prev',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude of the last fix at the surface during previous cycle'},\n {'name': 't_last_prev',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time of the last fix at the surface during previous cycle'},\n {'name': 'x_first',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude of the first fix at the surface'},\n {'name': 'y_first',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude of the first fix at the surface'},\n {'name': 't_first',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time of the first fix at the surface'},\n {'name': 'x_last',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude of the last fix at the surface'},\n {'name': 'y_last',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude of the last fix at the surface'},\n {'name': 't_last',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time of the last fix at the surface'},\n {'name': 'n_fix',\n 'unit': '',\n 'long_name': 'Number of surface fixes'},\n {'name': 'float_id',\n 'unit': '',\n 'long_name': 'Float ID'},\n {'name': 'n_cycle',\n 'unit': '',\n 'long_name': 'Cycle number'},\n {'name': 'inv_flag',\n 'unit': '',\n 'long_name': 'Time inversion/duplication flag'},\n ]\n\n dtype = [np.int32 if i in [24, 25, 26]\n else np.byte if i == 27\n else np.float32\n for i in range(28)]\n\n #=====================================================================\n # Set up the output file\n #=====================================================================\n var = []\n\n # get file length\n length = 0\n with open(finame, 'r') as fi:\n for line in fi:\n length += 1\n\n # create the out-file\n fo = nc.Dataset(foname, mode='w', format='NETCDF4', clobber=True)\n\n # create dims and vlan data type\n fo.createDimension('id', size=length)\n id_v = fo.createVariable('id', np.int64, 'id',\n zlib=zlib, fill_value=MISS_OUT)\n id_v[:] = range(1, length + 1)\n\n for i in range(len(variables)):\n v_dict = variables[i]\n v_obj = fo.createVariable(v_dict['name'], dtype[i], 'id', zlib=zlib,\n fill_value=missing[i])\n v_obj.units = v_dict['unit']\n v_obj.long_name = v_dict['long_name']\n var.append(v_obj)\n\n #=====================================================================\n # read and write the data\n #=====================================================================\n buf = [[] for i in range(len(variables))]\n idx = 0\n with open(finame, 'r') as fi:\n old_idx = idx\n for line in fi:\n idx += 1\n line = line.strip()\n [buf[i].append(dtype[i](val)) if val != missing[i]\n else buf[i].append(dtype[i](MISS_OUT))\n for i, val in enumerate(line.split())]\n # write chunk to disk and clear buffer\n if np.mod(idx, line_buffer) == 0:\n# id_v[old_idx:idx-1] = range(old_idx + 1,\n# len(buf[i][:]) + old_idx + 1)\n for i in range(len(variables)):\n var[i][old_idx:idx] = np.ma.array(\n buf[i],\n mask=[val == dtype[i](MISS_OUT)\n for val in buf[i]])\n\n old_idx = idx\n buf = [[] for i in range(len(variables))]\n # write last peace to file\n if old_idx != idx:\n# id_v[old_idx:idx - 1] = range(old_idx + 1, len(buf[i][:]) + old_idx + 1)\n for i in range(len(variables)):\n var[i][old_idx:idx] = np.ma.array(buf[i],\n mask=[val == dtype[i](MISS_OUT)\n for val in buf[i]])\n\n #=====================================================================\n # clean up and finish\n #=====================================================================\n fo.close()\n print \"yomaha2nc4 done after % 12.6f seconds\" % (tm.time() - tic)\n\n return None", "def __read_header(self):\n header = self.__file_object.readline()\n header_string = header.decode('utf-8')\n print(header_string)\n # Ignore first letter\n self.frame_width = int(re.findall('W\\d+', header_string)[0][1:])\n self.frame_height = int(re.findall('H\\d+', header_string)[0][1:])\n self.frame_rate = re.findall('F\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual frame rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.frame_rate.split(':')]\n self.frame_rate = round(tokens[0] / tokens[1], 1)\n\n self.__pixel_aspect_ratio = re.findall('A\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual pixel aspect ratio rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.__pixel_aspect_ratio.split(':')]\n self.__pixel_aspect_ratio = round(tokens[0] / tokens[1], 1)\n\n # Don't ignore for interlacing\n self.__interlacing_mode = re.findall('I(p|t|b|m)', header_string)[0]\n\n # Ignore first 'FRAME\\n' terminator so the file object points to the first byte of raw data of the first frame\n self.__file_object.readline()\n\n self.__first_frame_raw_data_position = self.__file_object.tell()\n\n self.determine_color_space_by_frame_size()\n\n # Restore\n self.__file_object.seek(self.__first_frame_raw_data_position)\n\n return header\n\n # Color space parameter is missing?\n print('FourCC:\\t\\t', header_string[:4])\n print('Input file:\\t', self.__input_file_path)\n print('Frame size:\\t', f'{self.frame_width}x{self.frame_height}')\n print('Frame rate:\\t', f'{self.frame_rate} FPS')\n print('Aspect Ratio:\\t', self.__pixel_aspect_ratio)\n print('Color space\\t', self.color_space)\n print('Frame size (raw data):', self.__frame_raw_data_size)\n print('Position of first raw:', self.__first_frame_raw_data_position)", "def load_materials(file_data, headers, base_path):\n\n\n def load_material_texture(texture_file):\n filename = os.path.join(base_path, texture_file + \".jpg\")\n try:\n img = bpy.data.images.load(str(filename))\n cTex = bpy.data.textures.new('ColorTex', type = 'IMAGE')\n cTex.image = img\n return cTex\n except:\n print (\"Cannot load image {}\".format(filename))\n return None\n\n\n def material_from_pack(material):\n \"\"\" \n Extract just the data we want from the full chunk\n \"\"\"\n texture_file_name = material[0].decode(\"utf-8\").replace('\\x00', '').strip()\n return (\n texture_file_name,\n load_material_texture(texture_file_name)\n )\n texture_offset, texture_length = headers[1]\n texture_chunk = Struct(\"64sii\") \n texture_size = texture_chunk.size\n texture_count = int(texture_length / texture_size)\n\n textures = []\n for current_texture_idx in range(texture_count):\n texture_file_position = texture_offset + current_texture_idx * texture_size\n packed_texture = texture_chunk.unpack(file_data[texture_file_position : texture_file_position+texture_size])\n current_texture = material_from_pack(packed_texture)\n textures.append(current_texture)\n \n return textures", "def _load_mock_bgs_mxxl_file_hdf5(filename):\n f = h5py.File(filename)\n ra = f[\"Data/ra\"][...].astype('f8') % 360.0\n dec = f[\"Data/dec\"][...].astype('f8')\n SDSSr_true = f[\"Data/app_mag\"][...].astype('f8')\n zred = f[\"Data/z_obs\"][...].astype('f8')\n f.close()\n\n return {'RA':ra, 'DEC':dec, 'Z': zred ,\n 'SDSSr_true':SDSSr_true}", "def process_raw_data_amld(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack, shift, maxSpeed='45',\n minSpeed='2'):\n import os\n import sys\n if sys.platform == 'win32':\n windows = True\n elif sys.platform != 'win32':\n windows = False\n\n import pandas as pd\n import datetime\n import os\n import gzip\n import numpy as np\n # import csv\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n xMinCarSpeed = -10\n ########################################################################\n #### WE DON'T HAVE AN RSSI INPUT\n ### (SO THIS IS A PLACEHOLDER FOR SOME SORT OF QA/QC VARIABLE)\n ## xMinRSSI = 50 #if RSSI is below this we don't like it\n ##################################################################\n\n # reading in the data with specific headers\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n sHeader = 'Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude,U (m/sec),V (m/sec),W (m/sec),T (degC),Dir (deg),Speed (m/sec),Compass (deg)'\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n infoHeader = \"FILENAME\\n\"\n # somehow gZIP is indicating if it is the first file name (I think if it is 0 then it is the first file)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename,\n 'r') # if in python 3, change this to \"r\" or just \"b\" can't remember but something about a bit not a string\n else:\n f = open(xDir + xFilename, 'r')\n\n infoHeader = \"FILENAME\\n\"\n\n # process - if first time on this car/date, then write header out\n headerNames = sHeader.split(',')\n #xdat = str('20') + xFilename[10:16]\n xdat = xDate\n fnOut = xOut + xCar + \"_\" + xdat + \"_dat.csv\" # set CSV output for raw data\n removeOut = xOut + xCar + \"_\" + xdat + \"_removed.csv\"\n fnLog = xOut + xCar + \"_\" + xdat + \".log\" # output for logfile\n infOut = xOut + xCar + \"_\" + xdat + \"_info.csv\"\n #\n\n #dtime = open(xDir + xFilename).readlines().pop(2).split(',')[0]\n #firstdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n # int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n # firsttime = firstdate.strftime('%s.%f')\n #firsttime = dt_to_epoch(firstdate)\n firsttime = float(open(xDir + xFilename).readlines().pop(2).split(',')[0])\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n ## read in file\n tempFile = pd.read_csv(xDir+xFilename)\n tempFile['DATE'] = tempFile.apply(lambda x: datetime.datetime.fromtimestamp(x.nearest10hz).strftime('%Y-%m-%d'),axis=1)\n tempFile['TIME'] = tempFile.apply(lambda x: datetime.datetime.fromtimestamp(x.nearest10hz).strftime('%H:%M:%S'),axis=1)\n tempFile['SECONDS'] = tempFile.apply(lambda x: int(float(str(x.nearest10hz)[10:])*1e9),axis=1)\n tempFile = tempFile.rename(columns = {'Velocity':'VELOCITY',\n 'Latitude':'LAT',\n 'Longitude':'LONG'})\n tempFile1 = tempFile.copy().sort_values('nearest10hz').reset_index(drop=True)\n\n if bFirst:\n #tempFile.sort_values('nearest10hz').reset_index(drop=True).to_csv(fnOutTemp)\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n\n if not bFirst:\n #fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n #fLog.write(\"Processing file: \" + str(xFilename) + \"\\n\")\n\n wind_df4 = tempFile1.copy()\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > -1, :]\n wrongSpeed = wind_df4.loc[wind_df4.VELOCITY <= xMinCarSpeed,:]\n wrongSpeed=wrongSpeed.assign(Reason='velocity too slow')\n\n #wind_df6 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :]\n wind_df6 = wind_df5.loc[wind_df5.VELOCITY < 1000, :]\n\n wrongSpeed2 = wind_df5.loc[wind_df5.VELOCITY >= xMaxCarSpeed, :]\n wrongSpeed2 = wrongSpeed2.assign(Reason='velocity too fast')\n\n wrongSpeeds = pd.concat([wrongSpeed,wrongSpeed2])\n #notGood = pd.concat([wrongSpeeds,nullCH4])\n notGood = pd.concat([wrongSpeeds])\n\n # wind_df6 = wind_df6a.loc[wind_df6a.R > .6999, :]\n\n del (wind_df4)\n wind_df4 = wind_df6.copy().drop_duplicates()\n wind_df5 = wind_df4.loc[wind_df4.CH4.notnull(), :]\n\n nullCH4 = wind_df4.loc[~wind_df4.CH4.notnull(), :]\n if nullCH4.shape[0] > 0:\n nullCH4 = nullCH4.assign(Reason='CH4 NA')\n removedDF = pd.concat([notGood,nullCH4])\n elif nullCH4.shape[0]==0:\n removedDF = notGood\n wind_df4 = wind_df5.copy()\n\n def rolling_cor(df, first, sec, window, newname):\n if (window % 2 == 1):\n sidewind = (window - 1) / 2\n else:\n sidewind = window / 2\n\n length = df.shape[0]\n cor_i = []\n for i in range(length):\n if (((i) < sidewind) or (i >= (length - sidewind))):\n cor_i.append(0)\n else:\n xvals = df.loc[(i - sidewind):(i + sidewind + 1), first]\n yvals = df.loc[(i - sidewind):(i + sidewind + 1), sec]\n cor_i.append(xvals.corr(yvals))\n df.loc[:, newname] = cor_i\n return (df)\n\n def rolling_c2h6(df, colname, window, percentile, newname):\n import numpy as np\n if (window % 2 == 1):\n sidewind = (window - 1) / 2\n else:\n sidewind = window / 2\n\n length = df.shape[0]\n cor_i = []\n for i in range(length):\n if (((i) < sidewind) or (i >= (length - sidewind))):\n cor_i.append(0)\n else:\n c2h6vals = df.loc[(i - sidewind):(i + sidewind + 1), colname]\n cor_i.append(np.percentile(c2h6vals, percentile))\n df.loc[:, newname] = cor_i\n return (df)\n\n wind_df5 = rolling_cor(wind_df4,'CH4','C2H6',80,'rollingR_8')\n wind_df6 = rolling_cor(wind_df5,'CH4','C2H6',150,'rollingR_15')\n wind_df7 = rolling_cor(wind_df6,'CH4','C2H6',300,'rollingR_30')\n wind_df8 = rolling_cor(wind_df7,'CH4','C2H6',450,'rollingR_45')\n wind_df9 = rolling_cor(wind_df8,'CH4','C2H6',600,'rollingR_60')\n wind_df10 = rolling_c2h6(wind_df9,'C2H6',300,50,'rollingc2h6_30')\n wind_df11 = rolling_c2h6(wind_df10,'C2H6',150,50,'rollingc2h6_15')\n wind_df12 = rolling_c2h6(wind_df11,'C2H6',450,50,'rollingc2h6_45')\n\n wind_df13 = rolling_c2h6(wind_df12,'CH4',450,50,'rollingch4_45')\n wind_df14 = rolling_c2h6(wind_df13,'CH4',300,50,'rollingch4_30')\n wind_df15 = rolling_c2h6(wind_df14,'CH4',150,50,'rollingch4_15')\n wind_df16 = rolling_c2h6(wind_df15,'CH4',600,50,'rollingch4_60')\n\n\n del(wind_df4)\n wind_df4 = wind_df16.copy()\n ## if you want to filter out high temperatures\n #wind_df4 = wind_df5.loc[wind_df5.TEMPC < 95, :].reset_index(drop=True)\n\n #fLog.write(\"Usable lines - \" + str(wind_df4.shape[0]) + \".\" + \"\\n\")\n #fLog.close()\n\n if bFirst:\n wind_df4.to_csv(fnOut, index=False)\n removedDF.to_csv(removeOut,index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(fnOut, index=False)\n removed = pd.read_csv(removeOut)\n pd.concat([removed, removedDF]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(removeOut, index=False)\n\n #os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def process_raw_data_aeris2(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack, shift, maxSpeed='45',\n minSpeed='2'):\n import sys\n if sys.platform == 'win32':\n windows = True\n elif sys.platform != 'win32':\n windows = False\n\n import pandas as pd\n from datetime import datetime\n import os\n import gzip\n import numpy as np\n # import csv\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n\n ########################################################################\n #### WE DON'T HAVE AN RSSI INPUT\n ### (SO THIS IS A PLACEHOLDER FOR SOME SORT OF QA/QC VARIABLE)\n ## xMinRSSI = 50 #if RSSI is below this we don't like it\n ##################################################################\n\n # reading in the data with specific headers\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n sHeader = 'Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude,U (m/sec),V (m/sec),W (m/sec),T (degC),Dir (deg),Speed (m/sec),Compass (deg)'\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n infoHeader = \"FILENAME\\n\"\n # somehow gZIP is indicating if it is the first file name (I think if it is 0 then it is the first file)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename,\n 'r') # if in python 3, change this to \"r\" or just \"b\" can't remember but something about a bit not a string\n else:\n f = open(xDir + xFilename, 'r')\n\n infoHeader = \"FILENAME\\n\"\n\n # process - if first time on this car/date, then write header out\n headerNames = sHeader.split(',')\n xdat = str('20') + xFilename[11:17]\n fnOut = xOut + xCar + \"_\" + xdat + \"_dat.csv\" # set CSV output for raw data\n fnLog = xOut + xCar + \"_\" + xdat + \"_log.csv\" # output for logfile\n infOut = xOut + xCar + \"_\" + xdat + \"_info.csv\"\n #\n\n dtime = open(xDir + xFilename).readlines().pop(1).split(',')[0]\n firstdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n #firsttime = firstdate.strftime('%s.%f')\n firsttime = dt_to_epoch(firstdate)\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n if bFirst:\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n if not bFirst:\n fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n fOut = open(fnOutTemp, 'w')\n fOut.write(sOutHeader)\n\n # read all lines\n xCntObs = -1\n xCntGoodValues = 0\n for row in f:\n woo = row\n bGood = True\n if xCntObs < 0:\n bGood = False\n xCntObs += 1\n if bGood:\n lstS = row.split(\",\")\n dtime = lstS[0]\n dateob = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n fdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n #seconds = fdate.strftime('%s.%f')\n seconds = dt_to_epoch(fdate)\n def getNS(seconds):\n ns = str(float(seconds) * 1e-3)[11:]\n # str(pd.to_numeric(str(float(seconds) * 1e-3)[11:]) * 100000)[:9]\n return (str(ns).ljust(15, '0'))[:9]\n\n import sys\n if sys.platform.startswith('win'):\n csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(\n dateob.strftime('%H:%M:%S')) + ',' + str(\n int(pd.to_numeric(dateob.strftime('%S.%f')))) + ',' + str(\n pd.to_numeric(dateob.strftime('%f')) * 1000) + str(',')\n csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(\n lstS[4]) + ',' + str('0') + ',' + str(lstS[4]) + ','\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(\n lstS[3]) + ',' + str(lstS[4]) + ',' + str(lstS[5]) + ',' + str(lstS[6]) + ','\n csvWrite += str(lstS[7]) + ',' + str(lstS[8]) + ',' + str(lstS[9]) + ',' + str(\n lstS[10]) + ',' + str(lstS[11]) + ',' + str(lstS[12]) + ',' + str(lstS[13]) + str(\n ',') + str(lstS[14])\n if not sys.platform.startswith('win'):\n csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(\n dateob.strftime('%H:%M:%S')) + ',' + str(str(float(seconds)*1e-3)[:10]) + ',' + getNS(seconds)+ str(',')\n csvWrite += str(lstS[20]) + ',' + str(lstS[15]) + ',' + str(lstS[16]) + ',' + str(\n lstS[17]) + ',' + str(\n lstS[4]) + ',' + str('0') + ',' + str(lstS[4]) + ','\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(\n lstS[3]) + ',' + str(lstS[4]) + ',' + str(lstS[5]) + ',' + str(lstS[6]) + ','\n csvWrite += str(lstS[7]) + ',' + str(lstS[8]) + ',' + str(lstS[9]) + ',' + str(\n lstS[10]) + ',' + str(lstS[11]) + ',' + str(lstS[12]) + ',' + str(lstS[13]) + str(\n ',') + str(lstS[14]) + '\\n'\n fOut.write(csvWrite)\n xCntObs += 1\n infOut.write(str(xFilename) + '\\n')\n fOut.close()\n fLog.close()\n infOut.close()\n # print(xCar + \"\\t\" + xdat + \"\\t\" + fnOut[-22:] + \"\\t\" + str(xCntObs) + \"\\t\" + str(xCntGoodValues) + \"\\t\" + str(\n # gZIP))\n print(f\"{xCar} \\t {xdat} \\t {fnOut[-(17 + len(xCar)):]} \\t {xCntObs} \\t {xCntGoodValues} \\t {gZIP}\")\n\n wind_df = pd.read_csv(fnOutTemp)\n wind_df_not_null = wind_df.loc[wind_df['LAT'].notnull(),].reset_index(drop=True)\n del (wind_df)\n wind_df = wind_df_not_null.copy()\n\n radians = False\n wind_df['QUADRANT'] = wind_df.apply(lambda row: get_quadrant(row['U'], row['V']), axis=1)\n wind_df['secnan'] = wind_df.apply(lambda row: row['SECONDS'] + row['NANOSECONDS'] * 1e-9,\n axis=1) # + row['NANOSECONDS']*1e-9,axis=1)\n wind_df['prev_LAT'] = wind_df.LAT.shift(periods=1)\n wind_df['next_LAT'] = wind_df.LAT.shift(periods=-1)\n wind_df['prev_LONG'] = wind_df.LONG.shift(periods=1)\n wind_df['next_LONG'] = wind_df.LONG.shift(periods=-1)\n wind_df['prev_TIME'] = wind_df.secnan.shift(periods=1)\n wind_df['next_TIME'] = wind_df.secnan.shift(periods=-1)\n wind_df['distance'] = wind_df.apply(\n lambda row: haversine(row['prev_LAT'], row['prev_LONG'], row['next_LAT'], row['next_LONG']), axis=1)\n\n wind_df['bearing'] = wind_df.apply(\n lambda row: calc_bearing(row['prev_LAT'], row['next_LAT'], row['prev_LONG'], row['next_LONG'], radians),\n axis=1)\n wind_df['timediff'] = wind_df.apply(lambda row: row['next_TIME'] - row['prev_TIME'], axis=1)\n # wind_df['VELOCITY_calc'] = wind_df.apply(lambda row:calc_velocity(row['timediff'],row['distance']),axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda x: (str(x.VELOCITY)),axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda x: 0 if x.VELOCITY == 'XX.X' else x.VELOCITY,axis = 1)\n wind_df['fVel'] = wind_df[\"VELOCITY\"].str.split(\".\", n = 1, expand = True)[0]\n wind_df = wind_df.loc[wind_df['fVel'].notnull(),].reset_index(drop=True)\n wind_df['firstVel'] = wind_df.apply(lambda x: int(x['fVel']),axis = 1)\n\n wind_df['sVel'] = wind_df[\"VELOCITY\"].str.split(\".\", n = 1, expand = True)[1]\n wind_df = wind_df.loc[wind_df['sVel'].notnull(),].reset_index(drop=True)\n wind_df['secVel'] = wind_df.apply(lambda x: int(x['sVel']),axis = 1)\n wind_df['vloc'] = wind_df.apply(lambda x: float(str(x.firstVel) + '.' + str(x.secVel)),axis = 1)\n wind_df2 = wind_df.drop(columns = ['VELOCITY','secVel','sVel','fVel','firstVel'])\n del(wind_df)\n wind_df2 = wind_df2.rename(columns = {'vloc':'VELOCITY'})\n wind_df = wind_df2.copy()\n del(wind_df2)\n ## CORRECT W WIND THING\n wind_df['W'] = wind_df.apply(lambda x: (str(x.W)),axis=1)\n wind_df['W'] = wind_df.apply(lambda x: 0 if x.W == 'XX.X' else x.W,axis = 1)\n wind_df['fW'] = wind_df[\"W\"].str.split(\".\", n = 1, expand = True)[0]\n #wind_df = wind_df.loc[wind_df['fW'].notnull(),].reset_index(drop=True)\n wind_df['firstW'] = wind_df.apply(lambda x: int(x['fW']),axis = 1)\n wind_df['sW'] = wind_df[\"W\"].str.split(\".\", n = 1, expand = True)[1]\n #wind_df = wind_df.loc[wind_df['sW'].notnull(),].reset_index(drop=True)\n wind_df['secW'] = wind_df.apply(lambda x: int(x['sW']),axis = 1)\n wind_df['wloc'] = wind_df.apply(lambda x: float(str(x.firstW) + '.' + str(x.secW)),axis = 1)\n wind_df2 = wind_df.drop(columns = ['W','secW','sW','fW','firstW'])\n del(wind_df)\n wind_df2 = wind_df2.rename(columns = {'wloc':'W'})\n wind_df = wind_df2.copy()\n del(wind_df2)\n\n ## CORRECT U WIND THING\n wind_df['U'] = wind_df.apply(lambda x: (str(x.U)),axis=1)\n wind_df['U'] = wind_df.apply(lambda x: 0 if x.U == 'XX.X' else x.U,axis = 1)\n wind_df['fU'] = wind_df[\"U\"].str.split(\".\", n = 1, expand = True)[0]\n wind_df['firstU'] = wind_df.apply(lambda x: int(x['fU']),axis = 1)\n wind_df['sU'] = wind_df[\"U\"].str.split(\".\", n = 1, expand = True)[1]\n wind_df['secU'] = wind_df.apply(lambda x: int(x['sU']),axis = 1)\n wind_df['uloc'] = wind_df.apply(lambda x: float(str(x.firstU) + '.' + str(x.secU)),axis = 1)\n wind_df2 = wind_df.drop(columns = ['U','secU','sU','fU','firstU'])\n del(wind_df)\n wind_df2 = wind_df2.rename(columns = {'uloc':'U'})\n wind_df = wind_df2.copy()\n del(wind_df2)\n\n ## CORRECT V WIND THING\n wind_df['V'] = wind_df.apply(lambda x: (str(x.V)),axis=1)\n wind_df['V'] = wind_df.apply(lambda x: 0 if x.V == 'XX.X' else x.V,axis = 1)\n wind_df['fV'] = wind_df[\"V\"].str.split(\".\", n = 1, expand = True)[0]\n wind_df['firstV'] = wind_df.apply(lambda x: int(x['fV']),axis = 1)\n wind_df['sV'] = wind_df[\"V\"].str.split(\".\", n = 1, expand = True)[1]\n wind_df['secV'] = wind_df.apply(lambda x: int(x['sV']),axis = 1)\n wind_df['vloc'] = wind_df.apply(lambda x: float(str(x.firstV) + '.' + str(x.secV)),axis = 1)\n wind_df2 = wind_df.drop(columns = ['V','secV','sV','fV','firstV'])\n del(wind_df)\n wind_df2 = wind_df2.rename(columns = {'vloc':'V'})\n wind_df = wind_df2.copy()\n del(wind_df2)\n\n wind_df['U_cor'] = wind_df.apply(lambda row: float(row['U']) + float(row['VELOCITY']), axis=1)\n wind_df['horz_length'] = wind_df.apply(lambda row: np.sqrt(row['U_cor'] ** 2 + row['V'] ** 2), axis=1)\n wind_df['uncor_theta'] = wind_df.apply(\n lambda row: calc_bearing(row['U_cor'], row['V'], row['QUADRANT'], row['horz_length'], radians), axis=1)\n wind_df['adj_theta'] = wind_df.apply(lambda row: (row['uncor_theta'] + row['bearing']) % 360, axis=1)\n wind_df['totalWind'] = wind_df.apply(lambda row: np.sqrt(row['horz_length'] ** 2 + row['W'] ** 2), axis=1)\n wind_df['phi'] = wind_df.apply(lambda row: np.arctan(row['horz_length']), axis=1)\n wind_df['shift_CH4'] = wind_df.CH4.shift(periods=int(float(shift)))\n wind_df['raw_CH4'] = wind_df.apply(lambda row: row['BCH4'], axis=1)\n wind_df['BCH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['CH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['TCH4'] = wind_df.loc[:, ['shift_CH4']]\n\n wind_df2 = wind_df[wind_df.CH4.notnull()]\n wind_df2 = wind_df.copy()\n wind_df3 = wind_df2.drop(\n ['QUADRANT', 'secnan', 'prev_LAT', 'next_LAT', 'prev_LONG', 'next_LONG', 'prev_TIME', 'next_TIME',\n 'timediff', 'uncor_theta', 'CH4'], axis=1)\n\n wind_df3['CH4'] = wind_df3.loc[:, 'shift_CH4']\n wind_df3 = wind_df3.drop(['shift_CH4'], axis=1)\n # wind_df4 = wind_df3.loc[wind_df3.totalWind.notnull(),:]\n wind_df3 = wind_df3.loc[:,\n ['DATE', 'TIME', 'SECONDS', 'NANOSECONDS', 'VELOCITY', 'U', 'V', 'W', 'BCH4', 'BRSSI', 'TCH4',\n 'TRSSI',\n 'PRESS_MBAR', 'INLET', 'TEMPC', 'CH4', 'H20', 'C2H6', 'R', 'C2C1', 'BATTV', 'POWMV', 'CURRMA',\n 'SOCPER',\n 'LAT', 'LONG', 'bearing', 'U_cor', 'horz_length', 'adj_theta', 'totalWind', 'phi', 'raw_CH4',\n 'distance']]\n wind_df3['odometer'] = wind_df3.loc[:, 'distance'].cumsum()\n wind_df4 = wind_df3.copy()\n\n # wind_df7 = add_odometer(wind_df4,'LAT','LONG')\n\n # wind_df4 = wind_df7.copy()\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > xMinCarSpeed, :]\n wind_df6 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :]\n\n del (wind_df4)\n wind_df4 = wind_df6.copy().drop_duplicates()\n wind_df5 = wind_df4.loc[wind_df4.CH4.notnull(), :]\n wind_df4 = wind_df5.copy()\n if bFirst:\n wind_df4.to_csv(fnOut, index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(fnOut, index=False)\n os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def read(self, fname):\r\n self.header = {}\r\n self.resetvals()\r\n infile = self._open(fname, \"rb\")\r\n self._readheader(infile)\r\n # Compute image size\r\n try:\r\n self.dim1 = int(self.header['NumberOfRowsInFrame'])\r\n self.dim2 = int(self.header['NumberOfColsInFrame'])\r\n self.bpp = int(self.header['BitsPerPixel'])\r\n except:\r\n raise Exception(\"GE file\", str(fname) + \\\r\n \"is corrupt, cannot read it\")\r\n\r\n # More than one image can be saved in a GE file\r\n # Will only load the first one\r\n\r\n\r\n # Go to the beginning of the file\r\n infile.seek(0)\r\n infile.seek(self.header['HeaderSizeInBytes'] + self.header['UserHeaderSizeInBytes'])\r\n\r\n ReadBytes = self.dim1 * self.dim2 * (self.bpp / 8)\r\n block = infile.read(ReadBytes)\r\n block = N.fromstring(block, N.uint16)\r\n\r\n infile.close()\r\n\r\n try:\r\n self.data = N.reshape(block, [self.dim2, self.dim1])\r\n except:\r\n print len(block), self.dim2, self.dim1\r\n raise IOError, \\\r\n 'Size spec in GE-header does not match size of image data field'\r\n\r\n self.bytecode = self.data.dtype.type\r\n self.pilimage = None\r\n return self", "def read_data(filename):\n # Store debug mode\n debug = params.debug\n params.debug = None\n\n # Initialize dictionary\n header_dict = {}\n\n headername = filename + \".hdr\"\n\n with open(headername, \"r\") as f:\n # Replace characters for easier parsing\n hdata = f.read()\n hdata = hdata.replace(\",\\n\", \",\")\n hdata = hdata.replace(\"\\n,\", \",\")\n hdata = hdata.replace(\"{\\n\", \"{\")\n hdata = hdata.replace(\"\\n}\", \"}\")\n hdata = hdata.replace(\" \\n \", \"\")\n hdata = hdata.replace(\";\", \"\")\n hdata = hdata.split(\"\\n\")\n\n # Loop through and create a dictionary from the header file\n for i, string in enumerate(hdata):\n if ' = ' in string:\n header_data = string.split(\" = \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n elif ' : ' in string:\n header_data = string.split(\" : \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n\n # Reformat wavelengths\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"{\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"}\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\" \", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].split(\",\")\n\n # Create dictionary of wavelengths\n wavelength_dict = {}\n for j, wavelength in enumerate(header_dict[\"wavelength\"]):\n wavelength_dict.update({float(wavelength): float(j)})\n\n # Replace datatype ID number with the numpy datatype\n dtype_dict = {\"1\": np.uint8, \"2\": np.int16, \"3\": np.int32, \"4\": np.float32, \"5\": np.float64, \"6\": np.complex64,\n \"9\": np.complex128, \"12\": np.uint16, \"13\": np.uint32, \"14\": np.uint64, \"15\": np.uint64}\n header_dict[\"data type\"] = dtype_dict[header_dict[\"data type\"]]\n\n # Read in the data from the file\n raw_data = np.fromfile(filename, header_dict[\"data type\"], -1)\n\n # Reshape the raw data into a datacube array\n array_data = raw_data.reshape(int(header_dict[\"lines\"]),\n int(header_dict[\"bands\"]),\n int(header_dict[\"samples\"])).transpose((0, 2, 1))\n\n if \"default bands\" in header_dict:\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"{\", \"\")\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"}\", \"\")\n default_bands = header_dict[\"default bands\"].split(\",\")\n\n pseudo_rgb = cv2.merge((array_data[:, :, int(default_bands[0])],\n array_data[:, :, int(default_bands[1])],\n array_data[:, :, int(default_bands[2])]))\n\n else:\n max_wavelength = max([float(i) for i in wavelength_dict.keys()])\n min_wavelength = min([float(i) for i in wavelength_dict.keys()])\n # Check range of available wavelength\n if max_wavelength >= 635 and min_wavelength <= 490:\n id_red = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 710)\n id_green = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 540)\n id_blue = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 480)\n\n pseudo_rgb = cv2.merge((array_data[:, :, [id_blue]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n else:\n # Otherwise take 3 wavelengths, first, middle and last available wavelength\n id_red = int(header_dict[\"bands\"]) - 1\n id_green = int(id_red / 2)\n pseudo_rgb = cv2.merge((array_data[:, :, [0]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n\n # Gamma correct pseudo_rgb image\n pseudo_rgb = pseudo_rgb ** (1 / 2.2)\n # Scale each of the channels up to 255\n pseudo_rgb = cv2.merge((rescale(pseudo_rgb[:, :, 0]),\n rescale(pseudo_rgb[:, :, 1]),\n rescale(pseudo_rgb[:, :, 2])))\n\n max_wl = float(str(header_dict[\"wavelength\"][-1]).rstrip())\n min_wl = float(str(header_dict[\"wavelength\"][0]).rstrip())\n\n # Create an instance of the spectral_data class\n spectral_array = Spectral_data(array_data=array_data, max_wavelength=max_wl,\n min_wavelength=min_wl, d_type=header_dict[\"data type\"],\n wavelength_dict=wavelength_dict, samples=int(header_dict[\"samples\"]),\n lines=int(header_dict[\"lines\"]), interleave=header_dict[\"interleave\"],\n wavelength_units=header_dict[\"wavelength units\"], array_type=\"datacube\",\n pseudo_rgb=pseudo_rgb, filename=filename)\n\n # Reset debug mode\n params.debug = debug\n\n if params.debug == \"plot\":\n # Gamma correct pseudo_rgb image\n plot_image(pseudo_rgb)\n elif params.debug == \"print\":\n print_image(pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + \"_pseudo_rgb.png\"))\n\n return spectral_array", "def _make_image_info_des(self, flistname):\n\n flist=[]\n psfex_flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n magzp_list.append(magzp)\n\n flist.append(fname)\n\n psfex_fname = fname.replace('.fits.fz','_psfcat.psf')\n psfex_flist.append(psfex_fname)\n\n nimage = len(flist)\n magzp = np.array(magzp_list)\n\n path_len = max([len(f) for f in flist])\n psfex_path_len = max([len(f) for f in psfex_flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n extra_dtype = [\n ('psfex_path','U%d' % psfex_path_len),\n ]\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f\n image_info['psfex_path'][i] = psfex_flist[i]\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def Load(self):\n\t\tfile = open(self.fileName, 'r')\n\t\tself.hdr = file.readline().split('\\n')[0].split(',')\n\t\t\n\t\tfor line in file.readlines():\n\t\t\ttokens = line.split('\\n')[0].split(',')\n\t\t\tif int(tokens[1]) == 0:\n\t\t\t\tself.h0.append(tokens[0])\n\t\t\telse:\n\t\t\t\tself.h1.append(tokens[0])\n\t\tfile.close()\n\t\tself.numH1 = len(self.h1)\n\t\tself.numH0 = len(self.h0)", "def readDataFromFile():\n image_size = 28 # each image is 28x28\n\n num_images = 60000 # there are 60k images\n with gzip.open(r'train-images-idx3-ubyte.gz', 'r') as f: # 60k train & valid\n f.read(16) # reading by 16-byte double\n buffer_Train_Images = f.read(image_size * image_size * num_images)\n f.close()\n data_Train_Images = np.frombuffer(buffer_Train_Images, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n data_Train_Images = data_Train_Images.reshape(num_images,\n image_size * image_size) # Data = 60k x 28 x 28 with 1 value in it\n\n with gzip.open('train-labels-idx1-ubyte.gz', 'r') as f: # 60k train & valid - labels\n f.read(8) # reading by 16-byte double\n buffer_Train_Labels = f.read(num_images)\n data_Train_Labels = np.frombuffer(buffer_Train_Labels, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n num_images = 10000 # there are 10k images\n with gzip.open('t10k-images-idx3-ubyte.gz', 'r') as f: # 10k tests\n f.read(16) # reading by 16-byte double\n buffer_Test_Image = f.read(image_size * image_size * num_images)\n data_Test_Image = np.frombuffer(buffer_Test_Image, dtype=np.uint8).astype(\n np.uint8) # translating into 0 to 255\n data_Test_Image = data_Test_Image.reshape(num_images, image_size * image_size) # Data = 60k x 28 x 28 with\n\n with gzip.open('t10k-labels-idx1-ubyte.gz', 'r') as f: # 10k tests - lbles\n f.read(8) # reading by 16-byte double\n buffer_Test_Label = f.read(num_images)\n data_Test_Labels = np.frombuffer(buffer_Test_Label, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n return data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels", "def process_raw_data_aeris_maybe(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack, shift,\n maxSpeed='45',\n minSpeed='2'):\n import os\n import sys\n if sys.platform == 'win32':\n windows = True\n elif sys.platform != 'win32':\n windows = False\n\n import pandas as pd\n import datetime\n import os\n import gzip\n import numpy as np\n # import csv\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n xMinCarSpeed = -10\n ########################################################################\n #### WE DON'T HAVE AN RSSI INPUT\n ### (SO THIS IS A PLACEHOLDER FOR SOME SORT OF QA/QC VARIABLE)\n ## xMinRSSI = 50 #if RSSI is below this we don't like it\n ##################################################################\n\n # reading in the data with specific headers\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n sHeader = 'Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude,U (m/sec),V (m/sec),W (m/sec),T (degC),Dir (deg),Speed (m/sec),Compass (deg)'\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n infoHeader = \"FILENAME\\n\"\n # somehow gZIP is indicating if it is the first file name (I think if it is 0 then it is the first file)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename,\n 'r') # if in python 3, change this to \"r\" or just \"b\" can't remember but something about a bit not a string\n else:\n f = open(xDir + xFilename, 'r')\n\n infoHeader = \"FILENAME\\n\"\n\n import pandas as pd\n\n\n # process - if first time on this car/date, then write header out\n headerNames = sHeader.split(',')\n # xdat = str('20') + xFilename[10:16]\n xdat = xDate\n fnOut = xOut + xCar + \"_\" + '20'+ xdat + \"_dat.csv\" # set CSV output for raw data\n removeOut = xOut + xCar + \"_\" + '20'+ xdat + \"_removed.csv\"\n fnLog = xOut + xCar + \"_\" + '20'+ xdat + \".log\" # output for logfile\n infOut = xOut + xCar + \"_\" +'20'+ xdat + \"_info.csv\"\n #\n\n # dtime = open(xDir + xFilename).readlines().pop(2).split(',')[0]\n # firstdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n # int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n # firsttime = firstdate.strftime('%s.%f')\n # firsttime = dt_to_epoch(firstdate)\n\n #firsttime = float(open(xDir + xFilename).readlines().pop(2).split(',')[0])\n\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n ## read in file\n\n tempFile = pd.read_csv(xDir + \"/\" + xFilename, delimiter=\",\")\n #dtime = tempFile.loc[2,'Time Stamp'].split(',')[0]\n\n #firstdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n # int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n\n tempFile['dtime'] = tempFile.apply(lambda x: x['Time Stamp'].split(',')[0],axis=1)\n tempFile['datime'] = tempFile.apply(lambda x: datetime.datetime(int(x.dtime[6:10]), int(x.dtime[0:2]), int(x.dtime[3:5]), int(x.dtime[11:13]),\n int(x.dtime[14:16]), int(x.dtime[17:19]), int(float(x.dtime[19:23]) * 1000000)),axis=1)\n tempFile['DATE'] = tempFile.apply(lambda x: x.datime.strftime('%Y-%m-%d'),axis=1)\n tempFile['TIME'] = tempFile.apply(lambda x: x.datime.strftime('%H:%M:%S'),axis=1)\n tempFile['nearest10hz'] = tempFile.apply(lambda x: round(float(x.datime.timestamp()),1),axis=1)\n tempFile['SECONDS'] = tempFile.apply(lambda x: int(float(str(x.nearest10hz)[11:]) * 1e9), axis=1)\n tempFile1 = tempFile.copy().sort_values(by='nearest10hz',ascending=True).reset_index(drop=True)\n tempFile1['nearest10hz'] = tempFile1.loc[:,'nearest10hz'].astype(float)\n tempFile1['nearest10hz'] = tempFile1.loc[:,'nearest10hz'].astype(str)\n\n del(tempFile)\n tempFile = tempFile1.copy()\n\n tempFile = tempFile.rename(columns={\n 'T (degC)':'T',\n 'Inlet Number':'inletNumber',\n 'P (mbars)':'P',\n 'CH4 (ppm)':'CH4',\n 'H2O (ppm)':'H2O',\n 'C2H6 (ppb)':'C2H6',\n 'C2/C1':'C1C2',\n 'Battery Charge (V)':'batteryCharge',\n 'Power Input (mV)':'powerInput',\n 'Current (mA)':'current',\n 'SOC (%)':'SOC','Time Stamp':'TimeStamp',\n 'Compass (deg)':'CompassDeg',\n 'Speed (m/sec)':'ws',\n 'Dir (deg)': 'winddir',\n 'U (m/sec)':'U',\n 'V (m/sec)': 'V',\n 'Latitude':'LAT',\n 'Longitude':'LONG',\n 'W (m/sec)':'W'})\n #tempFile1 = tempFile.copy().sort_values('nearest10hz').reset_index(drop=True)\n radians = False\n\n wind_df_temp = tempFile.copy()\n wind_df_temp['ttot'] = wind_df_temp.apply(lambda x: float(x.nearest10hz),axis=1)\n wind_df = wind_df_temp.copy().sort_values(by='ttot',ascending=True).reset_index(drop=True)\n\n wind_df['QUADRANT'] = wind_df.apply(lambda row: get_quadrant(row['U'], row['V']), axis=1)\n wind_df['prev_LAT'] = wind_df.LAT.shift(periods=1)\n wind_df['next_LAT'] = wind_df.LAT.shift(periods=-1)\n wind_df['prev_LONG'] = wind_df.LONG.shift(periods=1)\n wind_df['next_LONG'] = wind_df.LONG.shift(periods=-1)\n wind_df['prev_TIME'] = wind_df.ttot.shift(periods=1)\n wind_df['next_TIME'] = wind_df.ttot.shift(periods=-1)\n\n wind_df['distance'] = wind_df.apply(\n lambda row: haversine(row['prev_LAT'], row['prev_LONG'], row['next_LAT'], row['next_LONG']), axis=1)\n\n wind_df['bearing'] = wind_df.apply(\n lambda row: calc_bearing(row['prev_LAT'], row['next_LAT'], row['prev_LONG'], row['next_LONG'], radians),\n axis=1)\n wind_df['timediff'] = wind_df.apply(lambda row: row['next_TIME'] - row['prev_TIME'], axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda row:calc_velocity(row['timediff'],row['distance']),axis=1)\n\n\n try:\n wind_df['W'] = wind_df['W'].astype(float)\n except:\n wind_df_other = wind_df.copy()\n wind_df_other['W'] = wind_df_other.apply(lambda x: 0 if x.W == 'XX.X' else x.W, axis=1)\n wind_df_other['fW'] = wind_df_other[\"W\"].str.split(\".\", n=1, expand=True)[0]\n wind_df_other1 = wind_df_other.loc[wind_df_other['fW'].notnull(),].reset_index(drop=True)\n wind_df_other1['firstW'] = wind_df_other1.apply(lambda x: int(x['fW']), axis=1)\n wind_df_other1['sW'] = wind_df_other1[\"W\"].str.split(\".\", n=1, expand=True)[1]\n wind_df_other2 = wind_df_other1.loc[wind_df_other1['sW'].notnull(),].reset_index(drop=True)\n wind_df_other2['secW'] = wind_df_other2.apply(lambda x: int(x['sW']), axis=1)\n wind_df_other2['wloc'] = wind_df_other2.apply(lambda x: float(str(x.firstW) + '.' + str(x.secW)), axis=1)\n wind_df_other3 = wind_df_other2.drop(columns=['W', 'secW', 'sW', 'fW', 'firstW'])\n del (wind_df)\n wind_df4 = wind_df_other3.rename(columns={'wloc': 'W'})\n wind_df = wind_df4.copy()\n del (wind_df4)\n\n try:\n wind_df['U'] = wind_df['U'].astype(float)\n except:\n wind_df_other = wind_df.copy()\n wind_df_other['U'] = wind_df_other.apply(lambda x: 0 if x.U == 'XX.X' else x.U, axis=1)\n wind_df_other['fU'] = wind_df_other[\"U\"].str.split(\".\", n=1, expand=True)[0]\n wind_df_other1 = wind_df_other.loc[wind_df_other['fU'].notnull(),].reset_index(drop=True)\n wind_df_other1['firstU'] = wind_df_other1.apply(lambda x: int(x['fU']), axis=1)\n wind_df_other1['sU'] = wind_df_other1[\"U\"].str.split(\".\", n=1, expand=True)[1]\n wind_df_other2 = wind_df_other1.loc[wind_df_other1['sU'].notnull(),].reset_index(drop=True)\n wind_df_other2['secU'] = wind_df_other2.apply(lambda x: int(x['sU']), axis=1)\n wind_df_other2['uloc'] = wind_df_other2.apply(lambda x: float(str(x.firstU) + '.' + str(x.secU)), axis=1)\n wind_df_other3 = wind_df_other2.drop(columns=['U', 'secU', 'sU', 'fU', 'firstU'])\n del (wind_df)\n wind_df4 = wind_df_other3.rename(columns={'uloc': 'U'})\n wind_df = wind_df4.copy()\n del (wind_df4)\n\n try:\n wind_df['V'] = wind_df['V'].astype(float)\n except:\n wind_df_other = wind_df.copy()\n wind_df_other['V'] = wind_df_other.apply(lambda x: 0 if x.V == 'XX.X' else x.V, axis=1)\n wind_df_other['fV'] = wind_df_other[\"V\"].str.split(\".\", n=1, expand=True)[0]\n wind_df_other1 = wind_df_other.loc[wind_df_other['fV'].notnull(),].reset_index(drop=True)\n wind_df_other1['firstV'] = wind_df_other1.apply(lambda x: int(x['fV']), axis=1)\n wind_df_other1['sV'] = wind_df_other1[\"V\"].str.split(\".\", n=1, expand=True)[1]\n wind_df_other2 = wind_df_other1.loc[wind_df_other1['sV'].notnull(),].reset_index(drop=True)\n wind_df_other2['secV'] = wind_df_other2.apply(lambda x: int(x['sV']), axis=1)\n wind_df_other2['vloc'] = wind_df_other2.apply(lambda x: float(str(x.firstV) + '.' + str(x.secV)), axis=1)\n wind_df_other3 = wind_df_other2.drop(columns=['V', 'secV', 'sV', 'fV', 'firstV'])\n del (wind_df)\n wind_df4 = wind_df_other3.rename(columns={'vloc': 'V'})\n wind_df = wind_df4.copy()\n del (wind_df4)\n\n\n wind_df['U_cor'] = wind_df.apply(lambda row: float(row['U']) + float(row['VELOCITY']), axis=1)\n wind_df['horz_length'] = wind_df.apply(lambda row: np.sqrt(row['U_cor'] ** 2 + row['V'] ** 2), axis=1)\n wind_df['uncor_theta'] = wind_df.apply(\n lambda row: calc_bearing(row['U_cor'], row['V'], row['QUADRANT'], row['horz_length'], radians), axis=1)\n wind_df['adj_theta'] = wind_df.apply(lambda row: (row['uncor_theta'] + row['bearing']) % 360, axis=1)\n wind_df['totalWind'] = wind_df.apply(lambda row: np.sqrt(row['horz_length'] ** 2 + row['W'] ** 2), axis=1)\n wind_df['phi'] = wind_df.apply(lambda row: np.arctan(row['horz_length']), axis=1)\n\n wind_df['adj_v'] = wind_df.apply(lambda row: -row['horz_length'] * np.cos(row['adj_theta']), axis=1)\n wind_df['adj_u'] = wind_df.apply(lambda row: row['horz_length'] * np.sin(row['adj_theta']), axis=1)\n\n ## GO THROUGH WIND\n window_size = 30\n u_series = pd.Series(wind_df['adj_u'])\n u_windows = u_series.rolling(window_size)\n u_averages = pd.DataFrame(u_windows.mean())\n u_averages.columns = ['U_avg']\n u_averages['key'] = u_averages.index\n\n v_series = pd.Series(wind_df['adj_v'])\n v_windows = v_series.rolling(window_size)\n v_averages = pd.DataFrame(v_windows.mean())\n v_averages.columns = ['V_avg']\n v_averages['key'] = v_averages.index\n\n w_series = pd.Series(wind_df['W'])\n w_windows = w_series.rolling(window_size)\n w_averages = pd.DataFrame(w_windows.mean())\n w_averages.columns = ['W_avg']\n w_averages['key'] = w_averages.index\n\n vw_df = w_averages.set_index('key').join(v_averages.set_index('key'))\n vw_df['key'] = vw_df.index\n uvw_df = vw_df.set_index('key').join(u_averages.set_index('key'))\n uvw_df['key'] = uvw_df.index\n wind_df2 = wind_df.copy()\n wind_df2['key'] = wind_df2.index\n wind_df = uvw_df.set_index('key').join(wind_df2.set_index('key'))\n\n wind_df['r_avg'] = wind_df.apply(lambda row: np.sqrt(row['U_avg'] ** 2 + row['V_avg'] ** 2), axis=1)\n wind_df['theta_avg'] = wind_df.apply(lambda row: np.arctan(-row['U_avg'] / row['V_avg']), axis=1)\n\n wind_df['shift_CH4'] = wind_df.CH4.shift(periods=int(float(shift)))\n wind_df['raw_CH4'] = wind_df.apply(lambda row: row['CH4'], axis=1)\n wind_df['BCH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['CH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['TCH4'] = wind_df.loc[:, ['shift_CH4']]\n\n wind_df['shift_R'] = wind_df.R.shift(periods=int(float(shift)))\n wind_df['raw_R'] = wind_df.apply(lambda row: row['R'], axis=1)\n\n wind_df2 = wind_df[wind_df.CH4.notnull()]\n\n wind_df2 = wind_df.copy()\n wind_df3 = wind_df2.drop(\n ['QUADRANT', 'prev_LAT', 'next_LAT', 'prev_LONG', 'next_LONG', 'prev_TIME', 'next_TIME',\n 'timediff', 'uncor_theta', 'CH4', 'R'], axis=1)\n wind_df3['CH4'] = wind_df3.loc[:, 'shift_CH4']\n wind_df3['R'] = wind_df3.loc[:, 'shift_R']\n wind_df3 = wind_df3.drop(['shift_CH4', 'shift_R'], axis=1)\n # wind_df4 = wind_df3.loc[wind_df3.totalWind.notnull(),:]\n wind_df3['odometer'] = wind_df3.loc[:, 'distance'].cumsum()\n\n if bFirst:\n # tempFile.sort_values('nearest10hz').reset_index(drop=True).to_csv(fnOutTemp)\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n\n if not bFirst:\n # fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n # fLog.write(\"Processing file: \" + str(xFilename) + \"\\n\")\n\n wind_df4 = wind_df3.copy()\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > -1, :]\n wrongSpeed = wind_df4.loc[wind_df4.VELOCITY <= xMinCarSpeed, :]\n wrongSpeed = wrongSpeed.assign(Reason='velocity too slow')\n\n # wind_df6 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :]\n wind_df6 = wind_df5.loc[wind_df5.VELOCITY < 1000, :]\n\n wrongSpeed2 = wind_df5.loc[wind_df5.VELOCITY >= xMaxCarSpeed, :]\n wrongSpeed2 = wrongSpeed2.assign(Reason='velocity too fast')\n\n wrongSpeeds = pd.concat([wrongSpeed, wrongSpeed2])\n # notGood = pd.concat([wrongSpeeds,nullCH4])\n notGood = pd.concat([wrongSpeeds])\n\n # wind_df6 = wind_df6a.loc[wind_df6a.R > .6999, :]\n\n del (wind_df4)\n wind_df4 = wind_df6.copy().drop_duplicates()\n wind_df5 = wind_df4.loc[wind_df4.CH4.notnull(), :]\n\n nullCH4 = wind_df4.loc[~wind_df4.CH4.notnull(), :]\n if nullCH4.shape[0] > 0:\n nullCH4 = nullCH4.assign(Reason='CH4 NA')\n removedDF = pd.concat([notGood, nullCH4])\n elif nullCH4.shape[0] == 0:\n removedDF = notGood\n wind_df4 = wind_df5.copy()\n\n def rolling_cor(df, first, sec, window, newname):\n if (window % 2 == 1):\n sidewind = (window - 1) / 2\n else:\n sidewind = window / 2\n\n length = df.shape[0]\n cor_i = []\n for i in range(length):\n if (((i) < sidewind) or (i >= (length - sidewind))):\n cor_i.append(0)\n else:\n try:\n cor_i.append(xvals.corr(yvals))\n xvals = df.loc[(i - sidewind):(i + sidewind + 1), first]\n yvals = df.loc[(i - sidewind):(i + sidewind + 1), sec]\n except:\n cor_i.append(-2)\n\n df.loc[:, newname] = cor_i\n return (df)\n def rolling_c2h6(df, colname, window, percentile, newname):\n import numpy as np\n if (window % 2 == 1):\n sidewind = (window - 1) / 2\n else:\n sidewind = window / 2\n\n length = df.shape[0]\n cor_i = []\n for i in range(length):\n if (((i) < sidewind) or (i >= (length - sidewind))):\n cor_i.append(0)\n else:\n try:\n c2h6vals = df.loc[(i - sidewind):(i + sidewind + 1), colname]\n cor_i.append(np.percentile(c2h6vals, percentile))\n except:\n cor_i.append(-999)\n df.loc[:, newname] = cor_i\n return (df)\n\n wind_df5 = rolling_cor(wind_df4, 'CH4', 'C2H6', 8, 'rollingR_8')\n wind_df6 = rolling_cor(wind_df5, 'CH4', 'C2H6', 15, 'rollingR_15')\n wind_df7 = rolling_cor(wind_df6, 'CH4', 'C2H6', 30, 'rollingR_30')\n wind_df8 = rolling_cor(wind_df7, 'CH4', 'C2H6', 45, 'rollingR_45')\n wind_df9 = rolling_cor(wind_df8, 'CH4', 'C2H6', 60, 'rollingR_60')\n\n wind_df10 = rolling_c2h6(wind_df9, 'C2H6', 30, 50, 'rollingc2h6_30')\n wind_df11 = rolling_c2h6(wind_df10, 'C2H6', 15, 50, 'rollingc2h6_15')\n wind_df12 = rolling_c2h6(wind_df11, 'C2H6', 45, 50, 'rollingc2h6_45')\n\n wind_df13 = rolling_c2h6(wind_df12, 'CH4', 45, 50, 'rollingch4_45')\n wind_df14 = rolling_c2h6(wind_df13, 'CH4', 30, 50, 'rollingch4_30')\n wind_df15 = rolling_c2h6(wind_df14, 'CH4', 15, 50, 'rollingch4_15')\n wind_df16 = rolling_c2h6(wind_df15, 'CH4', 60, 50, 'rollingch4_60')\n\n del (wind_df4)\n wind_df4 = wind_df16.copy()\n ## if you want to filter out high temperatures\n # wind_df4 = wind_df5.loc[wind_df5.TEMPC < 95, :].reset_index(drop=True)\n\n # fLog.write(\"Usable lines - \" + str(wind_df4.shape[0]) + \".\" + \"\\n\")\n # fLog.close()\n\n if bFirst:\n wind_df4.sort_values(by='ttot',ascending=True).reset_index(drop=True).to_csv(fnOut, index=False)\n removedDF.to_csv(removeOut, index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='ttot',ascending=True).reset_index(drop=True).to_csv(fnOut, index=False)\n removed = pd.read_csv(removeOut)\n pd.concat([removed, removedDF]).sort_values(by='ttot',ascending=True).reset_index(drop=True).to_csv(removeOut,\n index=False)\n\n # os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def __init__(self,datafile,crop=False,usepickled=True,upsample=None,medianfilt=False,remove_rohr=False,dtype=None):\n if not isinstance(datafile,str) and not isinstance(datafile, unicode):\n self.D = datafile\n return\n self.info = WurzelInfo(datafile)\n info = self.info\n try:\n os.mkdir(os.path.join(info.datapath, datafile.replace(\".dat\",\"\")))\n except OSError:pass\n if not info.has_rohr: remove_rohr = False\n if not info.has_rohr: medianfilt = False\n\n picklename = os.path.join(info.datapath, datafile.replace(\".dat\",\"\"), \"upsampled.pickle\")\n #if usepickled and os.path.exists(picklename):\n if os.path.exists(picklename):\n self.load(picklename)\n if not all([x==y for x,y in zip(self.D.shape, info.shape )]):\n print \"After loading pickle, dimensions do not match: \", self.D.shape, info.shape\n import sys\n sys.exit(1)\n else:\n try:\n with open(os.path.join(info.datapath, datafile)) as fd:\n self.D = np.fromfile(file=fd, dtype=info.read_dtype).reshape(info.read_shape).astype(\"float32\")\n except:\n with open(os.path.join(info.datapath, datafile)) as fd:\n self.D = np.fromfile(file=fd, dtype=dtype).reshape(info.shape).astype(\"float32\")\n if info.read_dtype in [np.uint8, \"uint8\"]:\n self.D /= 255.0\n if medianfilt: self.median_filter()\n if remove_rohr: self.get_rid_of_roehrchen()\n #assert self.D.min()>= 0\n self.D[self.D<0]=0\n self.upsample(upsample)\n if not medianfilt:\n cnt = (self.D<0).sum()\n print \"fraction below zero: \", cnt/np.prod(self.D.shape)\n self.D[self.D<0]=0 # this is an upsampling-artefact (hopefully)\n if not all([x==y for x,y in zip(self.D.shape, info.shape )]):\n print \"After resampling, dimensions do not match: \", self.D.shape, info.shape\n import sys\n sys.exit(1)\n\n if medianfilt or remove_rohr or upsample:\n print \"Saving upsampled as \", picklename\n self.save(picklename)", "def read_meta(self):\n meta = cPickle.load(open('../sugar_analysis_data/META-CABALLO2.pkl'))\n self.meta_sn_name_list = []\n self.meta_zcmb = []\n self.meta_x0 =[]\n self.meta_x0_err = []\n self.meta_x1 =[]\n self.meta_x1_err = []\n self.meta_c = []\n self.meta_c_err = []\n self.meta_mb = []\n self.meta_mb_err = []\n self.meta_cov_x0_x1 = [] \n self.meta_cov_x0_c = []\n self.meta_cov_x1_c = []\n self.meta_cov_mb_x1 = []\n self.meta_cov_mb_c = [] \n self.meta_zhl = []\n self.meta_zhl_err = []\n self.meta_idr = []\n for meta_sn_name in meta.keys(): \n \n if meta[meta_sn_name]['idr.subset'] != 'bad' and meta[meta_sn_name]['idr.subset'] != 'auxiliary':\n \n self.meta_sn_name_list.append(meta_sn_name)\n self.meta_zhl_err.append(meta[meta_sn_name]['host.zhelio.err'])\n self.meta_zhl.append(meta[meta_sn_name]['host.zhelio'])\n self.meta_zcmb.append(meta[meta_sn_name]['host.zcmb'])\n self.meta_x0.append(meta[meta_sn_name]['salt2.X0'])\n self.meta_x0_err.append(meta[meta_sn_name]['salt2.X0.err'])\n self.meta_x1.append(meta[meta_sn_name]['salt2.X1'])\n self.meta_x1_err.append(meta[meta_sn_name]['salt2.X1.err'])\n self.meta_c.append(meta[meta_sn_name]['salt2.Color'])\n self.meta_c_err.append(meta[meta_sn_name]['salt2.Color.err'])\n self.meta_mb.append(meta[meta_sn_name]['salt2.RestFrameMag_0_B'])\n self.meta_mb_err.append(meta[meta_sn_name]['salt2.RestFrameMag_0_B.err'])\n self.meta_cov_x0_x1.append(meta[meta_sn_name]['salt2.CovX0X1'])\n self.meta_cov_x0_c.append(meta[meta_sn_name]['salt2.CovColorX0'])\n self.meta_cov_x1_c.append(meta[meta_sn_name]['salt2.CovColorX1'])\n self.meta_cov_mb_x1.append(meta[meta_sn_name]['salt2.CovRestFrameMag_0_BX1'])\n self.meta_cov_mb_c.append(meta[meta_sn_name]['salt2.CovColorRestFrameMag_0_B'])\n self.meta_idr.append(meta[meta_sn_name]['idr.subset'])\n \n self.meta_idr = np.array(self.meta_idr)\n self.meta_zcmb = np.array(self.meta_zcmb)\n self.meta_zhl = np.array(self.meta_zhl)\n self.meta_zhl_err = np.array(self.meta_zhl_err)\n self.meta_x0 = np.array(self.meta_x0)\n self.meta_x0_err = np.array(self.meta_x0_err)\n self.meta_x1 = np.array(self.meta_x1)\n self.meta_x1_err = np.array(self.meta_x1_err) \n self.meta_c = np.array(self.meta_c)\n self.meta_c_err = np.array(self.meta_c_err)\n self.meta_mb = np.array(self.meta_mb)\n self.meta_mb_err = np.array(self.meta_mb_err)\n self.meta_cov_x0_x1 = np.array(self.meta_cov_x0_x1)\n self.meta_cov_x0_c = np.array(self.meta_cov_x0_c)\n self.meta_cov_x1_c = np.array(self.meta_cov_x1_c)\n self.meta_cov_mb_x1 = np.array(self.meta_cov_mb_x1)\n self.meta_cov_mb_c = np.array(self.meta_cov_mb_c)", "def load_data(self):", "def read_mb_file(self,idir='.',ifile=None, gmt=True, verbose=False):\n \n import numpy as np\n import os\n \n if gmt==True:\n gmt_file=idir+'/../maps/en_velo.gmt'\n if isinstance(gmt,str):\n gmt_file=gmt\n \n if gmt != False:\n self.read_lon_lat(gmt_file,verbose=verbose)\n \n if ifile is None:\n mb_file_basename= idir + '/mb_'+self.code+'_GPS.dat'\n else:\n mb_file_basename=ifile\n \n data_NEU = []\n for i in range(1,4):\n mb_file = mb_file_basename + str(i)\n\n # file\n self.ifile=os.path.abspath(mb_file)\n \n data=np.genfromtxt(mb_file,skip_header=4)\n \n # reshape to ensure a 2D array\n if len(data.shape)==1:\n data=data.reshape((1,data.shape[0]))\n \n\n\n data_NEU.append(data)\n\n if data_NEU[0].shape == data_NEU[1].shape == data_NEU[2].shape:\n self.data=np.zeros((data_NEU[0].shape[0],7))\n self.data[:,0]=data_NEU[0][:,0]\n self.data[:,1]=data_NEU[0][:,1]#*to_mm\n self.data[:,2]=data_NEU[1][:,1]#*to_mm\n self.data[:,3]=data_NEU[2][:,1]#*to_mm\n\n self.data[:,4]=data_NEU[0][:,2]#*to_mm\n self.data[:,5]=data_NEU[1][:,2]#*to_mm\n self.data[:,6]=data_NEU[2][:,2]#*to_mm\n\n else: \n print(\"!!! Error reading \",mb_file_basename,\" :*dat1, *dat2, *dat3 do not have the same length\")\n self.data = None", "def updateHeaderComputedValues( self ):\n self.nAvgBytesPerSec = int( self.nNbrChannel*self.nSamplingRate*self.nNbrBitsPerSample/8 )\n self.nSizeBlockAlign = int( self.nNbrChannel*self.nNbrBitsPerSample/8 )\n self.dataType = Wav.getDataType( self.nNbrBitsPerSample )", "def read_data_small(filename):\n with bz2.BZ2File(filename) as f:\n data = []\n file_size = os.stat(filename).st_size\n chunk_size = 1024 * 1024 # 限制读取的数据\n print('Reading data...')\n for i in range(int(ceil(file_size // chunk_size) + 1)):\n bytes_to_read = min(chunk_size, file_size - (i * chunk_size))\n file_string = f.read(bytes_to_read).decode('utf-8')\n file_string = file_string.lower()\n file_string = nltk.word_tokenize(file_string) # nltk 提供的分词器\n data.extend(file_string)\n return data", "def initiallize_buffer(self):\n assert os.path.isdir(self.directory)\n #sorting files topologically, files' format is -> data_num.h5 \n files_list = sorted(os.listdir(self.directory + '/' + self.name + '/'), key = lambda x: int(x.split(\"_\")[1].split(\".\")[0]))\n self.files_counter = 0\n if files_list != []: \n for file_name in files_list:\n self.memorize(name = file_name, error = 1)\n self.files_counter += 1\n self.files_tracker = file_name\n else:\n self.files_tracker = 'data_-1.h5'", "def __init__(self, path, **kwargs):\n super().__init__(path, **kwargs)\n\n self._largeImagePath = self._getLargeImagePath()\n self._lastOpenSourceLock = threading.RLock()\n # 'c' must be first as channels are special because they can have names\n self._axesList = ['c', 'z', 't', 'xy']\n if not os.path.isfile(self._largeImagePath):\n try:\n possibleYaml = self._largeImagePath.split('multi://', 1)[-1]\n self._info = yaml.safe_load(possibleYaml)\n self._validator.validate(self._info)\n self._basePath = Path('.')\n except Exception:\n raise TileSourceFileNotFoundError(self._largeImagePath) from None\n else:\n try:\n with builtins.open(self._largeImagePath) as fptr:\n start = fptr.read(1024).strip()\n if start[:1] not in ('{', '#', '-') and (start[:1] < 'a' or start[:1] > 'z'):\n msg = 'File cannot be opened via multi-source reader.'\n raise TileSourceError(msg)\n fptr.seek(0)\n try:\n import orjson\n self._info = orjson.loads(fptr.read())\n except Exception:\n fptr.seek(0)\n self._info = yaml.safe_load(fptr)\n except (json.JSONDecodeError, yaml.YAMLError, UnicodeDecodeError):\n msg = 'File cannot be opened via multi-source reader.'\n raise TileSourceError(msg)\n try:\n self._validator.validate(self._info)\n except jsonschema.ValidationError:\n msg = 'File cannot be validated via multi-source reader.'\n raise TileSourceError(msg)\n self._basePath = Path(self._largeImagePath).parent\n self._basePath /= Path(self._info.get('basePath', '.'))\n for axis in self._info.get('axes', []):\n if axis not in self._axesList:\n self._axesList.append(axis)\n self._collectFrames()", "def read_data(self, f):\n\n f.seek(self.offset)\n # assume files are small enough to fit in memory\n data = f.read(self.compressed_size)\n if self.type == 0:\n return data\n elif self.type == 1:\n return gzip.decompress(data)\n elif self.type == 2:\n n, = struct.unpack('<L', data[:4])\n target = data[4:4+n].rstrip(b'\\0').decode('utf-8')\n logger.debug(f\"file redirection: {target}\")\n return None\n elif self.type == 3:\n return zstd_decompress(data)\n raise ValueError(f\"unsupported file type: {self.type}\")", "def analyze_pressure_dump(filename, Lx=200., Ly=200, Lz=900., N=10, bin_divide_flag=False, Natoms=113579):\n myfile = open(filename+'.txt')\n trajectory = []\n traj_pd = []\n frames = []\n\n for _ in range(3):\n next(myfile)\n count = 0\n while EOF(myfile):\n count += 1\n s = next(myfile) # info with the time step\n\n x = np.zeros(N, dtype=[('Chunk',np.float32), ('Coord1',np.float32), ('Ncount',np.float32), ('density',np.float32), ('temp',np.float32), ('vx',np.float32), ('fx',np.float32),('c_pciKE[1]',np.float32), ('c_pciKE[2]',np.float32), ('c_pciKE[3]',np.float32), ('c_pciVIR[1]',np.float32), ('c_pciVIR[2]',np.float32), ('c_pciVIR[3]',np.float32), ('c_pgelELAS[1]',np.float32), ('c_pgelELAS[2]',np.float32), ('c_pgelELAS[3]',np.float32), ('c_pgelVIR[1]', np.float32), ('c_pgelVIR[2]', np.float32), ('c_pgelVIR[3]', np.float32), ('c_pgelPAIR[1]', np.float32), ('c_pgelPAIR[2]', np.float32), ('c_pgelPAIR[3]', np.float32)])\n\n# Chunk Coord1 Ncount density/number temp vx fx c_pciKE[1] c_pciKE[2] c_pciKE[3] c_pciVIR[1] c_pciVIR[2] c_pciVIR[3] c_pgelELAS[1] c_pgelELAS[2] c_pgelELAS[3] c_pgelVIR[1] c_pgelVIR[2] c_pgelVIR[3] c_pgelPAIR[1] c_pgelPAIR[2] c_pgelPAIR[3]\n\n list_line = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", s)\n frame, _, _ = list_line\n frames.append(int(frame))\n # print( \"reading lines\")\n\n for i in xrange(N):\n count += 1\n s = next(myfile)\n list_line = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", s)\n # print( \"reading line\", i, list_line)\n for il, l in enumerate(list_line):\n x[i][il] = float(l)\n\n trajectory.append(x)\n\n # names = x.dtype.fields.keys()\n # data = x.dtype.fields.values()\n\n df = pd.DataFrame.from_records(x)\n traj_pd.append(df)\n\n myfile.close()\n\n\n\n # # volume = 218.*44.*44.\n volume = Lx*Ly*Lz\n # N_atoms = 113579\n # if bin_divide_flag:\n # bin_volume = volume / float(N)\n # else:\n # bin_volume = 1.\n\n bin_volume = volume / float(N)\n # bin_volume = volume\n # bin_volume /= float(Natoms)\n\n Combine_PD = pd.concat(traj_pd)\n FINAL_PD = pd.DataFrame()\n\n FINAL_PD['Coord1'] = Combine_PD['Coord1']\n FINAL_PD['p_ciKE'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pciKE[1]'] + Combine_PD['c_pciKE[2]'] + Combine_PD['c_pciKE[3]'])/(3.*bin_volume)\n FINAL_PD['p_ciVIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pciVIR[1]'] + Combine_PD['c_pciVIR[2]'] + Combine_PD['c_pciVIR[3]'])/(3.*bin_volume)\n FINAL_PD['p_gelELAS'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelELAS[1]'] + Combine_PD['c_pgelELAS[2]'] + Combine_PD['c_pgelELAS[3]'])/(3.*bin_volume)\n\n FINAL_PD['p_gelVIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelVIR[1]'] + Combine_PD['c_pgelVIR[2]'] + Combine_PD['c_pgelVIR[3]'])/(3.*bin_volume)\n FINAL_PD['p_gelPAIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelPAIR[1]'] + Combine_PD['c_pgelPAIR[2]'] + Combine_PD['c_pgelPAIR[3]'])/(3.*bin_volume)\n\n # So now I have to\n # P_bin = (sigma_per_atom_xx + ... + sigma_per_atom_zz)/(bin_volume*3)\n # *N_atoms_per_bin\n # N_atoms_per_bin = number_density*N_atoms\n\n\n df_concat = FINAL_PD\n\n by_row_index = df_concat.groupby(df_concat.index)\n df_means = by_row_index.mean()\n by_row_index_2 = df_concat.groupby(df_concat.index)\n df_stds = by_row_index_2.std()\n\n # print( df_means.head())\n # print( df_stds.head())\n return df_means, df_stds", "def __init__(self, resolver_context, path_spec):\n super(CompressedStreamFileSystem, self).__init__(\n resolver_context, path_spec)\n self._compression_method = None", "def runTest(self):\n nc = Dataset(self.file)\n data = nc['vl'][-1]\n # check max error of compression\n err = np.abs(data - self.data)\n assert(err.max() < nc['vl'].scale_factor)\n # turn off auto-scaling\n nc.set_auto_maskandscale(False)\n data = nc['vl'][-1]\n assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))\n nc.close()", "def test_large_import(self):\n # the original file (from the IDD) is a previous version of the file from\n # the data server for the gp03flmb platform\n self.create_sample_data_set_dir('node59p1_orig.dat', TELEM_DIR, 'node59p1.dat')\n self.assert_initialize()\n # one bad sample in here:\n # PH1236501_01D5u51F361E0_EC_162E has non ascii bytes at the end and is missing \\r\n result = self.data_subscribers.get_samples(DataParticleType.CONTROL, 1, 60)\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 49, 100)\n\n # this file is the more recent file off the data server for gp03flmb/d00001\n # this file appends more data to that in node59p1_orig\n self.create_sample_data_set_dir('node59p1.dat', TELEM_DIR)\n # several bad samples in here:\n # PH1236501_01D5u521208B4_A1_D274 doesn't have enough bytes (469 not 470)\n # PH1236501_01D5u52461BDC_CF_55BD doesn't have enough bytes (469 not 470)\n # PH1236501_01D5u5266BCF1_DA_6466 doesn't have enough bytes (469 not 470)\n # PH1236501_01DAu5288AF85_C9_7365, PH1236501_01DAu529E1BDF_42_4835\n # have extra bytes after the sample, not an error anymore\n # PH1236501_01D5u52B090DA_BA_8CC1 doesn't have enough bytes (469 not 470)\n # PH1236501_01DAu52B38839_BB_4134, PH1236501_01DAu52C8F493_34_3FC2\n # PH1236501_01DAu52ECE16B_79_F727, PH1236501_01DAu53024DC6_F2_7EC9 \n # have extra bytes after sample, not an error anymore\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 751, 430)", "def __init__(self, fpath, prefix, suffix, globar_year=None):\n self.fpath = fpath\n prefix_len = len(prefix)\n suffix_len = len(suffix)\n self.year = globar_year\n self.year_str = str(globar_year)\n\n # filename example : good_2005_0.pgz\n files = [f for f in listdir(fpath) if isfile(join(fpath, f)) and\n (f[-suffix_len:] == suffix and f[:prefix_len] == prefix)]\n\n pat = '{0}_{1}'.format(prefix, self.year_str)\n last_batch = \\\n list(sorted(filter(lambda f: pat in f, files)))\n first_batch = \\\n list(sorted(filter(lambda f: pat not in f, files)))\n\n logging.info('in ChunkReader.__init__() : '\n '(last_batch) {0} files : {1}'.format(len(last_batch), ' '.join(last_batch)))\n logging.info('in ChunkReader.__init__() : '\n '(first_batch) {0} files : {1}'.format(len(first_batch), ' '.join(first_batch)))\n\n first_batch.extend(last_batch)\n # queue-like usage\n self.files = first_batch[::-1]\n logging.info('in ChunkReader.__init__ : '\n 'all files {0} files : {1}'.format(len(self.files), ' '.join(self.files)))", "def parse_file(self, path, max_resolution, threshold, proteins={}):\n\n \"\"\"\n create regex pattern here so it is not done repeatedly while parsing file\n\n groups:\n 0 - Protein ID\n 1 - Chain ID\n 2 - Length of protein chain\n 3 - Exptl.\n 4 - Resolution\n 5 - R-factor\n 6 - FreeRValue\n \"\"\"\n regex_str = '(\\w{4})(\\w)\\s+(\\d+)\\s+(\\w+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)'\n regex_pattern = re.compile(regex_str)\n\n printc('Processing: %s' % path)\n\n raw = None\n try:\n _file = gzip.open(path, 'r')\n\n #first line is labels, discard it\n _file.readline()\n\n for line in _file:\n match = regex_pattern.match(line)\n if match:\n groups = match.groups()\n\n if groups[0] in proteins:\n # if protein already exists just update the additional\n # chain information. The properties should not change\n # between records in the selection file.\n protein = proteins[groups[0]]\n if not groups[1] in protein['chains']:\n protein['chains'].append(groups[1])\n #print 'Selecting Protein: %s Chain: %s Threshold: %s' % (groups[0],groups[1], threshold)\n\n else:\n # protein is not in proteins dict yet create initial\n # structure from parsed properties.\n resolution = float(groups[4])\n if resolution > 0 and resolution <= max_resolution:\n proteins[groups[0]] = {\n 'code':groups[0],\n 'chains':[groups[1]],\n 'resolution':groups[4],\n 'rfactor':groups[5],\n 'rfree':groups[6],\n 'threshold':threshold\n }\n\n #print 'Selecting Protein: %s Chain: %s Threshold: %s' % (groups[0],groups[1], threshold)\n\n finally:\n if _file:\n _file.close()\n\n return proteins", "def __init__(self, input_stream, threads=DEFAULT_THREADS, level=9):\n super(Pigz, self).__init__(\n input_stream,\n program=\"pigz\",\n threads=threads,\n level=level,\n suffix=\".gz\",\n )", "def process_raw_data_aeris(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack, shift, maxSpeed='45',\n minSpeed='2'):\n import os\n import sys\n if sys.platform == 'win32':\n windows = True\n elif sys.platform != 'win32':\n windows = False\n\n import pandas as pd\n from datetime import datetime\n import os\n import gzip\n import numpy as np\n # import csv\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n xMinCarSpeed = -10\n ########################################################################\n #### WE DON'T HAVE AN RSSI INPUT\n ### (SO THIS IS A PLACEHOLDER FOR SOME SORT OF QA/QC VARIABLE)\n ## xMinRSSI = 50 #if RSSI is below this we don't like it\n ##################################################################\n\n # reading in the data with specific headers\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n sHeader = 'Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude,U (m/sec),V (m/sec),W (m/sec),T (degC),Dir (deg),Speed (m/sec),Compass (deg)'\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n infoHeader = \"FILENAME\\n\"\n # somehow gZIP is indicating if it is the first file name (I think if it is 0 then it is the first file)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename,\n 'r') # if in python 3, change this to \"r\" or just \"b\" can't remember but something about a bit not a string\n else:\n f = open(xDir + xFilename, 'r')\n\n infoHeader = \"FILENAME\\n\"\n\n # process - if first time on this car/date, then write header out\n headerNames = sHeader.split(',')\n xdat = str('20') + xFilename[11:17]\n fnOut = xOut + xCar + \"_\" + xdat + \"_dat.csv\" # set CSV output for raw data\n removeOut = xOut + xCar + \"_\" + xdat + \"_removed.csv\"\n fnLog = xOut + xCar + \"_\" + xdat + \".log\" # output for logfile\n infOut = xOut + xCar + \"_\" + xdat + \"_info.csv\"\n #\n\n dtime = open(xDir + xFilename).readlines().pop(2).split(',')[0]\n firstdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n # firsttime = firstdate.strftime('%s.%f')\n firsttime = dt_to_epoch(firstdate)\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n if bFirst:\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n\n if not bFirst:\n fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n fLog.write(\"Processing file: \" + str(xFilename) + \"\\n\")\n\n fOut = open(fnOutTemp, 'w')\n fOut.write(sOutHeader)\n\n # read all lines\n xCntObs = -1\n xCntGoodValues = 0\n for row in f:\n woo = row\n bGood = True\n if xCntObs != -1:\n lstS = row.split(\",\")\n if float(lstS[2]) < 20:\n bGood = False\n xCntObs += 1\n if xCntObs < 0:\n bGood = False\n xCntObs += 1\n if bGood:\n lstS = row.split(\",\")\n dtime = lstS[0]\n dateob = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n fdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n # seconds = fdate.strftime('%s.%f')\n seconds = dt_to_epoch(fdate)\n\n def getNS(seconds):\n ns = str(float(seconds) * 1e-3)[11:]\n # str(pd.to_numeric(str(float(seconds) * 1e-3)[11:]) * 100000)[:9]\n return (str(ns).ljust(15, '0'))[:9]\n\n if len(lstS) > 6 and float(lstS[2]) > 20:\n csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(\n dateob.strftime('%H:%M:%S')) + ',' + str(str(float(seconds) * 1e-3)[:10]) + ',' + getNS(\n seconds) + str(',')\n csvWrite += str(lstS[20]) + ',' + str(lstS[15]) + ',' + str(lstS[16]) + ',' + str(\n lstS[17]) + ',' + str(\n lstS[4]) + ',' + str('0') + ',' + str(lstS[4]) + ','\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(\n lstS[3]) + ',' + str(lstS[4]) + ',' + str(lstS[5]) + ',' + str(lstS[6]) + ','\n csvWrite += str(lstS[7]) + ',' + str(lstS[8]) + ',' + str(lstS[9]) + ',' + str(\n lstS[10]) + ',' + str(lstS[11]) + ',' + str(lstS[12]) + ',' + str(lstS[13]) + str(\n ',') + str(lstS[14]) + '\\n'\n fOut.write(csvWrite)\n xCntObs += 1\n fLog.write(\"Imported \" + str(xCntObs) + \" lines\" + \"\\n\")\n\n infOut.write(str(xFilename) + '\\n')\n fOut.close()\n # fLog.close()\n infOut.close()\n # print(xCar + \"\\t\" + xdat + \"\\t\" + fnOut[-22:] + \"\\t\" + str(xCntObs) + \"\\t\" + str(xCntGoodValues) + \"\\t\" + str(\n # gZIP))\n print(f\"{xCar} \\t {xdat} \\t {fnOut[-(17 + len(xCar)):]} \\t {xCntObs} \\t {xCntGoodValues} \\t {gZIP}\")\n\n wind_df = pd.read_csv(fnOutTemp)\n wind_df_not_null = wind_df.loc[wind_df['LAT'].notnull(),].reset_index(drop=True)\n wind_df_null = wind_df.loc[~wind_df['LAT'].notnull(),].reset_index(drop=True)\n if wind_df_null.shape[0] > 0:\n wind_df_null=wind_df_null.assign(Reason='GPS NA')\n\n del (wind_df)\n wind_df = wind_df_not_null.copy()\n\n radians = False\n wind_df['QUADRANT'] = wind_df.apply(lambda row: get_quadrant(row['U'], row['V']), axis=1)\n wind_df['secnan'] = wind_df.apply(lambda row: row['SECONDS'] + row['NANOSECONDS'] * 1e-9,\n axis=1) # + row['NANOSECONDS']*1e-9,axis=1)\n wind_df['prev_LAT'] = wind_df.LAT.shift(periods=1)\n wind_df['next_LAT'] = wind_df.LAT.shift(periods=-1)\n wind_df['prev_LONG'] = wind_df.LONG.shift(periods=1)\n wind_df['next_LONG'] = wind_df.LONG.shift(periods=-1)\n wind_df['prev_TIME'] = wind_df.secnan.shift(periods=1)\n wind_df['next_TIME'] = wind_df.secnan.shift(periods=-1)\n wind_df['distance'] = wind_df.apply(\n lambda row: haversine(row['prev_LAT'], row['prev_LONG'], row['next_LAT'], row['next_LONG']), axis=1)\n wind_df['bearing'] = wind_df.apply(\n lambda row: calc_bearing(row['prev_LAT'], row['next_LAT'], row['prev_LONG'], row['next_LONG'], radians),\n axis=1)\n wind_df['timediff'] = wind_df.apply(lambda row: row['next_TIME'] - row['prev_TIME'], axis=1)\n #wind_df['VELOCITY_calc'] = wind_df.apply(lambda row: row['distance']/row['timediff'],axis=1)\n wind_df['VELOCITY_calc'] = wind_df.apply(lambda row:calc_velocity(row['timediff'], row['distance']),axis=1)\n\n wind_df['VELOCITY'] = wind_df.apply(lambda x: (str(x.VELOCITY)), axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda x: 0 if x.VELOCITY == 'XX.X' else x.VELOCITY, axis=1)\n wind_df['fVel'] = wind_df[\"VELOCITY\"].str.split(\".\", n=1, expand=True)[0]\n wind_df = wind_df.loc[wind_df['fVel'].notnull(),].reset_index(drop=True)\n wind_df['firstVel'] = wind_df.apply(lambda x: int(x['fVel']), axis=1)\n\n wind_df['sVel'] = wind_df[\"VELOCITY\"].str.split(\".\", n=1, expand=True)[1]\n wind_df = wind_df.loc[wind_df['sVel'].notnull(),].reset_index(drop=True)\n wind_df['secVel'] = wind_df.apply(lambda x: int(x['sVel']), axis=1)\n wind_df['vloc'] = wind_df.apply(lambda x: float(str(x.firstVel) + '.' + str(x.secVel)), axis=1)\n wind_df2 = wind_df.drop(columns=['VELOCITY', 'secVel', 'sVel', 'fVel', 'firstVel'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'vloc': 'VELOCITY'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n ## CORRECT W WIND THING\n wind_df['W'] = wind_df.apply(lambda x: (str(x.W)), axis=1)\n wind_df['W'] = wind_df.apply(lambda x: 0 if x.W == 'XX.X' else x.W, axis=1)\n wind_df['fW'] = wind_df[\"W\"].str.split(\".\", n=1, expand=True)[0]\n # wind_df = wind_df.loc[wind_df['fW'].notnull(),].reset_index(drop=True)\n wind_df['firstW'] = wind_df.apply(lambda x: int(x['fW']), axis=1)\n wind_df['sW'] = wind_df[\"W\"].str.split(\".\", n=1, expand=True)[1]\n # wind_df = wind_df.loc[wind_df['sW'].notnull(),].reset_index(drop=True)\n wind_df['secW'] = wind_df.apply(lambda x: int(x['sW']), axis=1)\n wind_df['wloc'] = wind_df.apply(lambda x: float(str(x.firstW) + '.' + str(x.secW)), axis=1)\n wind_df2 = wind_df.drop(columns=['W', 'secW', 'sW', 'fW', 'firstW'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'wloc': 'W'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n\n ## CORRECT U WIND THING\n wind_df['U'] = wind_df.apply(lambda x: (str(x.U)), axis=1)\n wind_df['U'] = wind_df.apply(lambda x: 0 if x.U == 'XX.X' else x.U, axis=1)\n wind_df['fU'] = wind_df[\"U\"].str.split(\".\", n=1, expand=True)[0]\n wind_df['firstU'] = wind_df.apply(lambda x: int(x['fU']), axis=1)\n wind_df['sU'] = wind_df[\"U\"].str.split(\".\", n=1, expand=True)[1]\n wind_df['secU'] = wind_df.apply(lambda x: int(x['sU']), axis=1)\n wind_df['uloc'] = wind_df.apply(lambda x: float(str(x.firstU) + '.' + str(x.secU)), axis=1)\n wind_df2 = wind_df.drop(columns=['U', 'secU', 'sU', 'fU', 'firstU'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'uloc': 'U'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n\n ## CORRECT V WIND THING\n wind_df['V'] = wind_df.apply(lambda x: (str(x.V)), axis=1)\n wind_df['V'] = wind_df.apply(lambda x: 0 if x.V == 'XX.X' else x.V, axis=1)\n wind_df['fV'] = wind_df[\"V\"].str.split(\".\", n=1, expand=True)[0]\n wind_df['firstV'] = wind_df.apply(lambda x: int(x['fV']), axis=1)\n wind_df['sV'] = wind_df[\"V\"].str.split(\".\", n=1, expand=True)[1]\n wind_df['secV'] = wind_df.apply(lambda x: int(x['sV']), axis=1)\n wind_df['vloc'] = wind_df.apply(lambda x: float(str(x.firstV) + '.' + str(x.secV)), axis=1)\n wind_df2 = wind_df.drop(columns=['V', 'secV', 'sV', 'fV', 'firstV'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'vloc': 'V'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n\n wind_df['U_cor'] = wind_df.apply(lambda row: float(row['U']) + float(row['VELOCITY_calc']), axis=1)\n wind_df['horz_length'] = wind_df.apply(lambda row: np.sqrt(row['U_cor'] ** 2 + row['V'] ** 2), axis=1)\n wind_df['uncor_theta'] = wind_df.apply(\n lambda row: calc_bearing(row['U_cor'], row['V'], row['QUADRANT'], row['horz_length'], radians), axis=1)\n wind_df['adj_theta'] = wind_df.apply(lambda row: (row['uncor_theta'] + row['bearing']) % 360, axis=1)\n wind_df['totalWind'] = wind_df.apply(lambda row: np.sqrt(row['horz_length'] ** 2 + row['W'] ** 2), axis=1)\n wind_df['phi'] = wind_df.apply(lambda row: np.arctan(row['horz_length']), axis=1)\n\n wind_df['adj_v'] = wind_df.apply(lambda row: -row['horz_length'] * np.cos(row['adj_theta']), axis=1)\n wind_df['adj_u'] = wind_df.apply(lambda row: row['horz_length'] * np.sin(row['adj_theta']), axis=1)\n\n ## GO THROUGH WIND\n window_size = 30\n u_series = pd.Series(wind_df['adj_u'])\n u_windows = u_series.rolling(window_size)\n u_averages = pd.DataFrame(u_windows.mean())\n u_averages.columns = ['U_avg']\n u_averages['key'] = u_averages.index\n\n v_series = pd.Series(wind_df['adj_v'])\n v_windows = v_series.rolling(window_size)\n v_averages = pd.DataFrame(v_windows.mean())\n v_averages.columns = ['V_avg']\n v_averages['key'] = v_averages.index\n\n w_series = pd.Series(wind_df['W'])\n w_windows = w_series.rolling(window_size)\n w_averages = pd.DataFrame(w_windows.mean())\n w_averages.columns = ['W_avg']\n w_averages['key'] = w_averages.index\n\n vw_df = w_averages.set_index('key').join(v_averages.set_index('key'))\n vw_df['key'] = vw_df.index\n uvw_df = vw_df.set_index('key').join(u_averages.set_index('key'))\n uvw_df['key'] = uvw_df.index\n wind_df2 = wind_df.copy()\n wind_df2['key'] = wind_df2.index\n wind_df = uvw_df.set_index('key').join(wind_df2.set_index('key'))\n\n wind_df['r_avg'] = wind_df.apply(lambda row: np.sqrt(row['U_avg'] ** 2 + row['V_avg'] ** 2), axis=1)\n wind_df['theta_avg'] = wind_df.apply(lambda row: np.arctan(-row['U_avg'] / row['V_avg']), axis=1)\n\n wind_df['shift_CH4'] = wind_df.CH4.shift(periods=int(float(shift)))\n wind_df['raw_CH4'] = wind_df.apply(lambda row: row['BCH4'], axis=1)\n wind_df['BCH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['CH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['TCH4'] = wind_df.loc[:, ['shift_CH4']]\n\n wind_df['shift_R'] = wind_df.R.shift(periods=int(float(shift)))\n wind_df['raw_R'] = wind_df.apply(lambda row: row['R'], axis=1)\n\n wind_df2 = wind_df[wind_df.CH4.notnull()]\n wind_df2_null = wind_df[~wind_df.CH4.notnull()]\n if wind_df2_null.shape[0] > 0:\n wind_df2_null=wind_df2_null.assign(Reason='GPS NA')\n nullCH4 = pd.concat([wind_df_null,wind_df2_null])\n\n\n wind_df2 = wind_df.copy()\n wind_df3 = wind_df2.drop(\n ['QUADRANT', 'secnan', 'prev_LAT', 'next_LAT', 'prev_LONG', 'next_LONG', 'prev_TIME', 'next_TIME',\n 'timediff', 'uncor_theta', 'CH4', 'R','VELOCITY'], axis=1)\n wind_df3['CH4'] = wind_df3.loc[:, 'shift_CH4']\n wind_df3['R'] = wind_df3.loc[:, 'shift_R']\n wind_df3 = wind_df3.drop(['shift_CH4', 'shift_R'], axis=1)\n # wind_df4 = wind_df3.loc[wind_df3.totalWind.notnull(),:]\n wind_df3['odometer'] = wind_df3.loc[:, 'distance'].cumsum()\n wind_df3a = wind_df3.copy().rename(columns = {'VELOCITY_calc':'VELOCITY'})\n wind_df4 = wind_df3a.loc[:,\n ['DATE', 'TIME', 'SECONDS', 'NANOSECONDS', 'VELOCITY', 'U', 'V', 'W', 'BCH4', 'BRSSI', 'TCH4',\n 'TRSSI',\n 'PRESS_MBAR', 'INLET', 'TEMPC', 'CH4', 'H20', 'C2H6', 'R', 'C2C1', 'BATTV', 'POWMV', 'CURRMA',\n 'SOCPER',\n 'LAT', 'LONG', 'bearing', 'U_cor', 'horz_length', 'adj_theta', 'totalWind',\n 'phi', 'raw_CH4', 'raw_R', 'U_avg', 'V_avg', 'W_avg', 'r_avg', 'theta_avg', 'distance', 'odometer']]\n\n # wind_df7 = add_odometer(wind_df4,'LAT','LONG')\n\n # wind_df4 = wind_df7.copy()\n #wind_df5 = wind_df4.loc[wind_df4.VELOCITY > xMinCarSpeed, :]\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > -1, :]\n\n wrongSpeed = wind_df4.loc[wind_df4.VELOCITY <= xMinCarSpeed,:]\n wrongSpeed=wrongSpeed.assign(Reason='velocity too slow')\n\n #wind_df6 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :]\n wind_df6 = wind_df5.loc[wind_df5.VELOCITY < 1000, :]\n\n wrongSpeed2 = wind_df5.loc[wind_df5.VELOCITY >= xMaxCarSpeed, :]\n wrongSpeed2 = wrongSpeed2.assign(Reason='velocity too fast')\n\n wrongSpeeds = pd.concat([wrongSpeed,wrongSpeed2])\n\n\n notGood = pd.concat([wrongSpeeds,nullCH4])\n # wind_df6 = wind_df6a.loc[wind_df6a.R > .6999, :]\n\n del (wind_df4)\n wind_df4 = wind_df6.copy().drop_duplicates()\n wind_df5 = wind_df4.loc[wind_df4.CH4.notnull(), :]\n\n nullCH4 = wind_df4.loc[~wind_df4.CH4.notnull(), :]\n if nullCH4.shape[0] > 0:\n nullCH4 = nullCH4.assign(Reason='CH4 NA')\n removedDF = pd.concat([notGood,nullCH4])\n if nullCH4.shape[0]==0:\n removedDF = notGood\n wind_df4 = wind_df5.copy()\n\n ## if you want to filter out high temperatures\n #wind_df4 = wind_df5.loc[wind_df5.TEMPC < 95, :].reset_index(drop=True)\n\n fLog.write(\"Usable lines - \" + str(wind_df4.shape[0]) + \".\" + \"\\n\")\n fLog.close()\n\n if bFirst:\n wind_df4.to_csv(fnOut, index=False)\n removedDF.to_csv(removeOut,index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(fnOut, index=False)\n removed = pd.read_csv(removeOut)\n pd.concat([removed, removedDF]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(removeOut, index=False)\n\n os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def __init__(self,filePath,headerSymbols=['@','+']):\n if filePath.endswith('.gz'):\n self._file = gzip.open(filePath)\n else:\n self._file = open(filePath, 'rU')\n self._currentLineNumber = 0\n self._hdSyms = headerSymbols", "def __init__(self,filePath,headerSymbols=['@','+']):\n if filePath.endswith('.gz'):\n self._file = gzip.open(filePath)\n else:\n self._file = open(filePath, 'rU')\n self._currentLineNumber = 0\n self._hdSyms = headerSymbols", "def refreshDataSizeCrc(self):\n if isinstance(self,InstallerArchive):\n archiveRoot = GPath(self.archive).sroot\n else:\n archiveRoot = self.archive\n reReadMe = self.reReadMe\n docExts = self.docExts\n docDirs = self.docDirs\n dataDirsPlus = self.dataDirsPlus\n dataDirsMinus = self.dataDirsMinus\n skipExts = self.skipExts\n bethFiles = bush.bethDataFiles\n packageFiles = set(('package.txt','package.jpg'))\n unSize = 0\n espmNots = self.espmNots\n skipVoices = self.skipVoices\n off_local = self.off_local\n if espmNots and not skipVoices:\n skipEspmVoices = set(x.cs for x in espmNots)\n else:\n skipEspmVoices = None\n skipDistantLOD = settings['bash.installers.skipDistantLOD']\n hasExtraData = self.hasExtraData\n type = self.type\n if type == 2:\n allSubs = set(self.subNames[1:])\n activeSubs = set(x for x,y in zip(self.subNames[1:],self.subActives[1:]) if y)\n #--Init to empty\n self.readMe = self.packageDoc = self.packagePic = None\n for attr in ('skipExtFiles','skipDirFiles','espms'):\n object.__getattribute__(self,attr).clear()\n data_sizeCrc = {}\n skipExtFiles = self.skipExtFiles\n skipDirFiles = self.skipDirFiles\n espms = self.espms\n dest_src = {}\n #--Bad archive?\n if type not in (1,2): return dest_src\n #--Scan over fileSizeCrcs\n for full,size,crc in self.fileSizeCrcs:\n file = full #--Default\n if type == 2: #--Complex archive\n subFile = full.split('\\\\',1)\n if len(subFile) == 2:\n sub,file = subFile\n if sub not in activeSubs:\n if sub not in allSubs:\n skipDirFiles.add(file)\n continue\n rootPos = file.find('\\\\')\n extPos = file.rfind('.')\n fileLower = file.lower()\n rootLower = (rootPos > 0 and fileLower[:rootPos]) or ''\n fileExt = (extPos > 0 and fileLower[extPos:]) or ''\n #--Skip file?\n if (rootLower == 'omod conversion data' or \n fileLower[-9:] == 'thumbs.db' or fileLower[-11:] == 'desktop.ini'):\n continue #--Silent skip\n elif skipDistantLOD and fileLower[:10] == 'distantlod':\n continue\n elif skipVoices and fileLower[:11] == 'sound\\\\voice':\n continue\n elif file in bethFiles:\n skipDirFiles.add(full)\n continue\n elif not hasExtraData and rootLower and rootLower not in dataDirsPlus:\n skipDirFiles.add(full)\n continue\n elif hasExtraData and rootLower and rootLower in dataDirsMinus:\n skipDirFiles.add(full)\n continue\n elif fileExt in skipExts:\n skipExtFiles.add(full)\n continue\n #--Remap (and/or skip)\n dest = file #--Default. May be remapped below.\n #--Esps\n if not rootLower and reModExt.match(fileExt):\n pFile = pDest = GPath(file)\n if pFile in off_local:\n pDest = off_local[pFile]\n dest = pDest.s\n espms.add(pDest)\n if pDest in espmNots: continue\n #--Esp related voices (Oblivion)\n elif skipEspmVoices and fileLower[:12] == 'sound\\\\voice\\\\':\n farPos = file.find('\\\\',12)\n if farPos > 12 and fileLower[12:farPos] in skipEspmVoices:\n continue\n #--Docs\n elif rootLower in docDirs:\n dest = 'Docs\\\\'+file[rootPos+1:]\n elif not rootLower:\n maReadMe = reReadMe.match(file)\n if file.lower() == 'masterlist.txt':\n pass\n elif maReadMe:\n if not (maReadMe.group(1) or maReadMe.group(3)):\n dest = 'Docs\\\\%s%s' % (archiveRoot,fileExt)\n else:\n dest = 'Docs\\\\'+file\n self.readMe = dest\n elif fileLower == 'package.txt':\n dest = self.packageDoc = 'Docs\\\\'+archiveRoot+'.package.txt'\n elif fileLower == 'package.jpg':\n dest = self.packagePic = 'Docs\\\\'+archiveRoot+'.package.jpg'\n elif fileExt in docExts:\n dest = 'Docs\\\\'+file\n #--Save\n key = GPath(dest)\n data_sizeCrc[key] = (size,crc)\n dest_src[key] = full\n unSize += size\n self.unSize = unSize\n (self.data_sizeCrc,old_sizeCrc) = (data_sizeCrc,self.data_sizeCrc)\n #--Update dirty?\n if self.isActive and data_sizeCrc != old_sizeCrc:\n dirty_sizeCrc = self.dirty_sizeCrc\n for file,sizeCrc in old_sizeCrc.iteritems():\n if file not in dirty_sizeCrc and sizeCrc != data_sizeCrc.get(file):\n dirty_sizeCrc[file] = sizeCrc\n #--Done (return dest_src for install operation)\n return dest_src", "def read(files, save):\n\t# NOTE all soundings are size obs long, they must be filled in with zeros for this data format...\n\t# create the HDF5 document\n\tdoc = h5(save)\n\tsize = 450 # this hopefully exceeds the size of the arrays # CPIN Files are much shorter...\n\tdoc.create(pres=size, temp=size, dewpt=size, rh=size, r=size, u=size, v=size, z=size, lat=1, lon=1, theta=size, thte=size,\n\t\twspd=size, wdir=size, gamma=size, stab=size, N=size, rich=size, thtdef=size, cpin=size)\n\t# those last two do not have to be included...\n\t# Z=geopotenital height\n\n\t# now read the files!\n\tfor f in sorted(files):\n\t\tfname = f.split('/')[-1]\n\t\t# if 'smth' not in fname and NCAR not in fname: continue\n\t\tl.info('reading ' + fname)\n\t\t# launch time comes from line 2 of the file, the last element\n\t\tdf = open(f, 'r')\n\t\ttxt = df.read(2000).split('\\n') # way more than we need\n\t\tdf.close()\n\t\tlatln = txt[0].split() # keys 1,2 will be what we want\n\t\ttry:\n\t\t\ttm = s2t(txt[1].split()[-1] + 'UTC', '%Y%m%d%H%M%Z')\n\t\texcept:\n\t\t\t# drat.\n\t\t\tprint txt.split('\\n')[1]\n\t\t\tcontinue\n\t\ttry:\n\t\t\tif 'cpin' in fname:\n\t\t\t\tz, p, t, td, rh, r, wb, tv, tht, thte, thtw, ws, wd, u, v, vflg, gamma, stab, N, rich, thtdef, cpin = np.loadtxt(f, skiprows=4, unpack=True)\n\t\t\t\t# r is mixing ratio\n\t\t\telse:\n\t\t\t\tz, p, t, td, rh, r, wb, tv, tht, thte, thtw, ws, wd, u, v, vflg, gamma, stab, N, rich = np.loadtxt(f, skiprows=4, unpack=True)\n\t\t\t\t# r is mixing ratio\n\t\texcept:\n\t\t\tl.warning('This file could not be read')\n\t\t\tcontinue\n\n\t\t# and append this data! I will trust the time seconds, instead of recomputing the time\n\t\t# but, before that, we have to make them all the same size - size long\n\t\tnl = np.zeros(size - t.shape[0]) - 999.00 # -999 array to fluff the end\n\t\tp = np.concatenate((p, nl))\n\t\tt = np.concatenate((t, nl))\n\t\ttd = np.concatenate((td, nl))\n\t\trh = np.concatenate((rh, nl))\n\t\tr = np.concatenate((r, nl))\n\t\ttv = np.concatenate((tv, nl))\n\t\ttht = np.concatenate((tht, nl))\n\t\tthte = np.concatenate((thte, nl))\n\t\tws = np.concatenate((ws, nl))\n\t\twd = np.concatenate((wd, nl))\n\t\tgamma = np.concatenate((gamma, nl))\n\t\tstab = np.concatenate((stab, nl))\n\t\tN = np.concatenate((N, nl))\n\t\trich = np.concatenate((rich, nl))\n\t\tu = np.concatenate((u, nl))\n\t\tv = np.concatenate((v, nl))\n\t\tz = np.concatenate((z, nl))\n\t\tif 'cpin' in fname:\n\t\t\tcpin = np.concatenate((cpin, nl))\n\t\t\tthtdef = np.concatenate((thtdef, nl))\n\t\t\tdoc.append(tm, persist=True, pres=p, temp=t, dewpt=td, rh=rh, r=r, u=u, v=v, z=z, lat=[latln[1]], lon=[latln[2]],\n\t\t\t\ttheta=tht, thte=thte, wspd=ws, wdir=wd, gamma=gamma, stab=stab, N=N, rich=rich, cpin=cpin, thtdef=thtdef)\n\t\telse:\n\t\t\tdoc.append(tm, persist=True, pres=p, temp=t, dewpt=td, rh=rh, r=r, u=u, v=v, z=z, lat=[latln[1]], lon=[latln[2]],\n\t\t\t\ttheta=tht, thte=thte, wspd=ws, wdir=wd, gamma=gamma, stab=stab, N=N, rich=rich)\n\tdoc.close()", "def load_data(self) -> None:", "def __init__(self, path):\n self.side_len = None\n self.path = path\n self.src = rasterio.open(self.path)\n self.resolution_scaler = 2 / self.src.res[0]\n self.pad_val = 255 # for empty tiles on the edges of the frame", "def __init__(self, fname):\n f = zopen(fname, \"rt\")\n\n # skip header lines\n for i in range(2):\n f.readline()\n\n # number of atoms included in the file followed by the position of the origin of the volumetric data\n line = f.readline().split()\n self.natoms = int(line[0])\n self.origin = np.array(np.array(list(map(float, line[1:]))))\n\n # The next three lines give the number of voxels along each axis (x, y, z) followed by the axis vector.\n line = f.readline().split()\n self.NX = int(line[0])\n self.X = np.array([bohr_to_angstrom * float(l) for l in line[1:]])\n\n line = f.readline().split()\n self.NY = int(line[0])\n self.Y = np.array([bohr_to_angstrom * float(l) for l in line[1:]])\n\n line = f.readline().split()\n self.NZ = int(line[0])\n self.Z = np.array([bohr_to_angstrom * float(l) for l in line[1:]])\n\n self.voxelVolume = abs(np.dot(np.cross(self.X, self.Y), self.Z))\n self.volume = abs(np.dot(np.cross(self.X.dot(self.NZ), self.Y.dot(self.NY)), self.Z.dot(self.NZ)))\n\n # The last section in the header is one line for each atom consisting of 5 numbers,\n # the first is the atom number, second is charge, the last three are the x,y,z coordinates of the atom center.\n self.sites = []\n for i in range(self.natoms):\n line = f.readline().split()\n self.sites.append(Site(line[0], np.multiply(bohr_to_angstrom, list(map(float, line[2:])))))\n\n self.structure = Structure(\n lattice=[self.X * self.NX, self.Y * self.NY, self.Z * self.NZ],\n species=[s.specie for s in self.sites],\n coords=[s.coords for s in self.sites],\n coords_are_cartesian=True,\n )\n\n # Volumetric data\n self.data = np.zeros((self.NX, self.NY, self.NZ))\n i = 0\n for s in f:\n for v in s.split():\n self.data[\n int(i / (self.NY * self.NZ)),\n int((i / self.NZ) % self.NY),\n int(i % self.NZ),\n ] = float(v)\n i += 1", "def _load_disk(self):\r\n s = self.file_string + ' '\r\n im = Image.open(self.file_string)\r\n\r\n self.ix, self.iy = im.size\r\n s += '(%s)' % im.mode\r\n self.alpha = (im.mode == 'RGBA' or im.mode == 'LA')\r\n\r\n if self.mipmap:\r\n resize_type = Image.BICUBIC\r\n else:\r\n resize_type = Image.NEAREST\r\n\r\n # work out if sizes > MAX_SIZE or coerce to golden values in WIDTHS\r\n if self.iy > self.ix and self.iy > MAX_SIZE: # fairly rare circumstance\r\n im = im.resize((int((MAX_SIZE * self.ix) / self.iy), MAX_SIZE))\r\n self.ix, self.iy = im.size\r\n n = len(WIDTHS)\r\n for i in xrange(n-1, 0, -1):\r\n if self.ix == WIDTHS[i]:\r\n break # no need to resize as already a golden size\r\n if self.ix > WIDTHS[i]:\r\n im = im.resize((WIDTHS[i], int((WIDTHS[i] * self.iy) / self.ix)),\r\n resize_type)\r\n self.ix, self.iy = im.size\r\n break\r\n\r\n if VERBOSE:\r\n print('Loading ...{}'.format(s))\r\n\r\n if self.flip:\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n\r\n RGBs = 'RGBA' if self.alpha else 'RGB'\r\n self.image = im.convert(RGBs).tostring('raw', RGBs)\r\n self._tex = ctypes.c_int()\r\n if 'fonts/' in self.file_string:\r\n self.im = im", "def read(self, FN, multiplier=None):\n if FN is None:\n raise Exception('File is not defined')\n elif FN.endswith('.dx') or FN.endswith('.dx.gz'):\n data = self._read_dx(FN)\n elif FN.endswith('.nc'):\n data = self._read_nc(FN)\n else:\n raise Exception('File type not supported')\n if multiplier is not None:\n data['origin'] = multiplier * data['origin']\n data['spacing'] = multiplier * data['spacing']\n return data" ]
[ "0.61328614", "0.6049478", "0.5808387", "0.5781122", "0.569807", "0.5556485", "0.5516141", "0.5501835", "0.54966", "0.54787964", "0.5472631", "0.5433721", "0.54056853", "0.5392633", "0.5383074", "0.5370794", "0.53649384", "0.5350947", "0.53504694", "0.5339291", "0.53373736", "0.5334897", "0.532513", "0.5303453", "0.5298207", "0.5277429", "0.52679133", "0.52611446", "0.5246408", "0.524398", "0.524157", "0.52313787", "0.5227547", "0.5224333", "0.52052313", "0.5200573", "0.519111", "0.5189077", "0.51874584", "0.51860315", "0.5183185", "0.51680267", "0.51613134", "0.5159185", "0.5149681", "0.51477337", "0.51408523", "0.51359844", "0.5132276", "0.51271594", "0.5126156", "0.5117522", "0.51141524", "0.51097834", "0.5106516", "0.51063484", "0.51032007", "0.51008296", "0.50928235", "0.5091203", "0.5088889", "0.50888515", "0.5087363", "0.508064", "0.5073282", "0.5063802", "0.5060935", "0.5059772", "0.5057043", "0.5049716", "0.5049716", "0.5049438", "0.50434756", "0.5040507", "0.50359213", "0.50270987", "0.50244856", "0.5022314", "0.50188416", "0.5012658", "0.5010668", "0.5001102", "0.4995", "0.4994662", "0.49935275", "0.4992741", "0.4989582", "0.4989361", "0.4987622", "0.4976721", "0.49714738", "0.4971172", "0.4971172", "0.496948", "0.49676394", "0.49642676", "0.49630243", "0.49629852", "0.49623296", "0.49617645" ]
0.5289991
25
won't load any data yet because the files are gzipped and just reading the header takes 2.6 G and a long time! This means we need to set magzp and scale later when we read
def _make_image_info_des(self, flistname): flist=[] psfex_flist=[] magzp_list=[] with open(flistname) as fobj: for line in fobj: ls = line.split() fname = ls[0] magzp = float(ls[1]) magzp_list.append(magzp) flist.append(fname) psfex_fname = fname.replace('.fits.fz','_psfcat.psf') psfex_flist.append(psfex_fname) nimage = len(flist) magzp = np.array(magzp_list) path_len = max([len(f) for f in flist]) psfex_path_len = max([len(f) for f in psfex_flist]) try: ext_len = len(self['image_ext']) except: ext_len=None extra_dtype = [ ('psfex_path','U%d' % psfex_path_len), ] #image_info = meds.util.get_image_info_struct( image_info = get_image_info_struct( nimage, path_len, ext_len=ext_len, extra_dtype=extra_dtype, ) image_info['position_offset'] = 1 image_info['image_ext'] = self['image_ext'] image_info['weight_ext'] = self['weight_ext'] for i,f in enumerate(flist): image_info['image_id'][i] = i image_info['image_path'][i] = f image_info['weight_path'][i] = f image_info['psfex_path'][i] = psfex_flist[i] image_info['magzp'] = magzp image_info['scale'] = self._get_scale_from_magzp(magzp) return image_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def parseDec_GZIPLoader(self, data): # Once gziploader is decrypted, parse the 0xA9 byte config\n hardcoded_val = data[0:1]\n flag = data[1:2]\n datfile_size = int.from_bytes(data[2:6], byteorder=\"little\")\n print(\"[+] datfile size: \", datfile_size)\n dllfile_size = int.from_bytes(data[6:10], byteorder=\"little\")\n print(\"[+] dllfile size: \", dllfile_size)\n dirname = self.extractStrFromBuff(data[10:])\n print(\"[+] Directory Name:\", dirname)\n\n count = self.incNulls(data[10 + len(dirname):])\n datname = self.extractStrFromBuff(data[count + 10 + len(dirname):])\n print(\"[+] Dat Name :\", datname)\n\n count = count + 10 + len(dirname) + len(datname)\n datname = datname[1:]\n count2 = self.incNulls(data[count:])\n count = count + count2\n dllname = self.extractStrFromBuff(data[count:])\n print(\"[+] Dll Name: \", dllname)\n count += len(dllname)\n count2 = self.incNulls(data[count:])\n count += count2\n # datfile offset is now 710 bytes in\n datfile_data = data[710:710 + datfile_size]\n dllfile_start = 710 + datfile_size\n dllfile_data = data[dllfile_start:dllfile_start + dllfile_size]\n datfile_b64 = base64.b64encode(datfile_data).decode()\n dllfile_b64 = base64.b64encode(dllfile_data).decode()\n ParsedDict = {\"Directory_Name\": dirname.decode(),\n \"DatFile_Name\": datname.decode(),\n \"DllFile_Name\": dllname.decode(),\n \"DatFile\": datfile_b64,\n \"DllFile\": dllfile_b64}\n\n return ParsedDict", "def load_file(path):\n with open(path, \"rb\") as f: # bsps are binary files\n byte_list = f.read() # stores all bytes in bytes1 variable (named like that to not interfere with builtin names\n header = load_header(byte_list)\n skin_names = [byte_list[header.ofs_skins + 64 * x:header.ofs_skins + 64 * x + 64].decode(\"ascii\", \"ignore\") for x in range(header.num_skins)]\n triangles = load_triangles(byte_list[header.ofs_tris:header.ofs_frames], header)\n frames = load_frames(byte_list[header.ofs_frames:header.ofs_glcmds], header)\n texture_coordinates = load_texture_coordinates(byte_list[header.ofs_st:header.ofs_tris], header)\n gl_commands = load_gl_commands(byte_list[header.ofs_glcmds:header.ofs_end])\n # print(header)\n # print(skin_names)\n # print(triangles)\n # print(frames)\n # print(texture_coordinates)\n for i in range(len(texture_coordinates)):\n texture_coordinates[i].s = texture_coordinates[i].s/header.skinwidth\n texture_coordinates[i].t = texture_coordinates[i].t / header.skinheight\n # print(texture_coordinates)\n # print(header.num_xyz)\n for i_frame in range(len(frames)):\n for i_vert in range((header.num_xyz)):\n frames[i_frame].verts[i_vert].v[0] = frames[i_frame].verts[i_vert].v[0]*frames[i_frame].scale.x+frames[i_frame].translate.x\n frames[i_frame].verts[i_vert].v[1] = frames[i_frame].verts[i_vert].v[1] * frames[i_frame].scale.y + frames[i_frame].translate.y\n frames[i_frame].verts[i_vert].v[2] = frames[i_frame].verts[i_vert].v[2] * frames[i_frame].scale.z + frames[i_frame].translate.z\n model = md2_object(header, skin_names, triangles, frames, texture_coordinates, gl_commands)\n return model", "def lazy_read_file(self):\n store = zarr.DirectoryStore(self.fpath)\n z_array = zarr.open(store=store, mode='r')\n self.da_input = da.from_array(z_array)\n self.data = self.da_input\n self.data_dim = self.data.shape\n self.chunk_size = z_array.chunks", "def loader(filename,wdm=0,verbose=0,kmpers=1):\n with open(filename, 'rb') as f:\n if wdm == False:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n infoBytes = f.tell()\n if verbose>2:\n print(infoBytes)\n #skip darkmatter\n #read the first dm line\n if verbose>2:\n print(f.tell())\n catd = np.fromfile(f,dtype= dmdtype, count=1) \n #get the bytes location and subtract off the bytes location after loading info to get n bytes a line for dm\n if verbose>2:\n print(f.tell())\n current = f.tell()\n dmBytes = current-infoBytes\n f.seek(dmBytes*(info['nd'][0]-1)+current)\n if verbose>2:\n print(f.tell())\n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n else:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n if verbose>2:\n print(f.tell())\n # #dark matter setup count is reading the number of ?rows? \n catd= np.fromfile(f,dmdtype, count=info['nd'][0]) \n if verbose>2:\n print(f.tell()) \n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n \n \n #convert to physical units as found in README.md\n if wdm == True:\n catd['mass']*=2.324876e9\n if kmpers == 1:\n catd['vx']*=100.\n catd['vy']*=100.\n catd['vz']*=100.\n cats['mass']*=2.324876e9\n if kmpers == 1:\n cats['vx']*=100.\n cats['vy']*=100.\n cats['vz']*=100.\n \n if wdm == True:\n return(catd,cats,info)\n else:\n return(cats,info)", "def process_raw_data(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack, shift, maxSpeed='45',\n minSpeed='2'):\n import pandas as pd\n from datetime import datetime\n import os,gzip,csv,sys\n from numpy import pi\n import numpy as np\n radians = False\n\n import sys\n if sys.platform == 'win32':\n windows = True\n elif sys.platform != 'win32':\n windows = False\n\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n\n # reading in the data with specific headers\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n infoHeader = \"FILENAME\\n\"\n # somehow gZIP is indicating if it is the first file name (I think if it is 0 then it is the first file)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename, 'r')\n else:\n f = open(xDir + \"/\" + xFilename, 'r')\n\n infoHeader = \"FILENAME\\n\"\n\n # process\n # if first time on this car/date, then write header out\n headerNames = sHeader.split(',')\n xdat = str('20') + xFilename[11:17]\n # xdat = str(xFilename[len(xCar)+1:len(xCar) + 9])\n\n # fnOut = xOutDir + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_dat.csv\" #set CSV output for raw data\n # fnLog = xOutDir + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_log.csv\" #output for logfile\n\n fnOut = xOut + xCar + \"_\" + xdat + \"_dat.csv\" # set CSV output for raw data\n fnLog = xOut + xCar + \"_\" + xdat + \"_log.csv\" # output for logfile\n infOut = xOut + xCar + \"_\" + xdat + \"_info.csv\"\n #\n\n dtime = open(xDir + xFilename).readlines().pop(1).split(',')[0]\n firstdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]), int(dtime[14:16]),\n int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n #firsttime = firstdate.strftime('%s.%f')\n firsttime = dt_to_epoch(firstdate)\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n if bFirst:\n # fOut = open(fnOut, 'w')\n # fOut.write(sOutHeader)\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n if not bFirst:\n fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n fOut = open(fnOutTemp, 'w')\n fOut.write(sOutHeader)\n\n # read all lines\n xCntObs = -1\n xCntGoodValues = 0\n for row in f:\n bGood = True\n if xCntObs < 0:\n bGood = False\n xCntObs += 1\n if bGood:\n lstS = row.split(\",\")\n dtime = lstS[0]\n dateob = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n # epoch = dateob.strftime('%s.%f')\n # dtime = int(dateob.strftime('%Y%m%d%H%M%S'))\n\n fdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n #seconds = fdate.strftime('%s.%f')\n seconds = dt_to_epoch(fdate)\n def getNS(seconds):\n ns = str(float(seconds) * 1e-3)[11:]\n #str(pd.to_numeric(str(float(seconds) * 1e-3)[11:]) * 100000)[:9]\n return (str(ns).ljust(15, '0'))[:9]\n\n csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(dateob.strftime('%H:%M:%S')) + ',' + str(\n str(float(seconds)*1e-3)[:10]) + ',' + getNS(seconds) + str(',')\n csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(\n lstS[4]) + ',' + str('0') + ',' + str(lstS[4]) + ','\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(\n lstS[4]) + ',' + str(lstS[5]) + ',' + str(lstS[6]) + ','\n csvWrite += str(lstS[7]) + ',' + str(lstS[8]) + ',' + str(lstS[9]) + ',' + str(\n lstS[10]) + ',' + str(lstS[11]) + ',' + str(lstS[12]) + ',' + str(lstS[13]) + str(',') + str(\n lstS[14])\n\n if float(seconds) >= (float(firsttime) + (60 * float(initialTimeBack))):\n fOut.write(csvWrite)\n del (seconds)\n del (csvWrite)\n xCntObs += 1\n # sOut = str(gZIP) + \",\" + str(f) + \",\" + str(xCntObs) + \",\" + str(xCntGoodValues) + \"\\n\"\n # fLog.write(sOut)\n infOut.write(str(xFilename) + '\\n')\n fOut.close()\n fLog.close()\n infOut.close()\n\n print(f\"{xCar} \\t {xdat} \\t {fnOut[-(17 + len(xCar)):]} \\t {xCntObs} \\t {xCntGoodValues} \\t {gZIP}\")\n\n wind_df = pd.read_csv(fnOutTemp)\n wind_df['shift_CH4'] = wind_df.CH4.shift(periods=int(float(shift)))\n wind_df['raw_CH4'] = wind_df.apply(lambda row: row['BCH4'], axis=1)\n wind_df['BCH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['CH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['TCH4'] = wind_df.loc[:, ['shift_CH4']]\n\n wind_df['shift_R'] = wind_df.R.shift(periods=int(float(shift)))\n wind_df['raw_R'] = wind_df.apply(lambda row: row['R'], axis=1)\n wind_df_not_null = wind_df.loc[wind_df['LAT'].notnull(),].reset_index(drop=True)\n del (wind_df)\n wind_df = wind_df_not_null.copy()\n\n radians = False\n wind_df['QUADRANT'] = wind_df.apply(lambda row: get_quadrant(row['U'], row['V']), axis=1)\n wind_df['secnan'] = wind_df.apply(lambda row: row['SECONDS'] + row['NANOSECONDS'] * 1e-9,\n axis=1) # + row['NANOSECONDS']*1e-9,axis=1)\n wind_df['prev_LAT'] = wind_df.LAT.shift(periods=1)\n wind_df['next_LAT'] = wind_df.LAT.shift(periods=-1)\n wind_df['prev_LONG'] = wind_df.LONG.shift(periods=1)\n wind_df['next_LONG'] = wind_df.LONG.shift(periods=-1)\n wind_df['prev_TIME'] = wind_df.secnan.shift(periods=1)\n wind_df['next_TIME'] = wind_df.secnan.shift(periods=-1)\n wind_df['distance'] = wind_df.apply(\n lambda row: haversine(row['prev_LAT'], row['prev_LONG'], row['next_LAT'], row['next_LONG']), axis=1)\n wind_df['bearing'] = wind_df.apply(\n lambda row: calc_bearing(row['prev_LAT'], row['next_LAT'], row['prev_LONG'], row['next_LONG'], radians),\n axis=1)\n wind_df['timediff'] = wind_df.apply(lambda row: row['next_TIME'] - row['prev_TIME'], axis=1)\n wind_df['VELOCITY_calc'] = wind_df.apply(lambda row:calc_velocity(row['timediff'],row['distance']),axis=1)\n wind_df = wind_df.drop(columns = ['VELOCITY'])\n wind_df = wind_df.rename(columns = {'VELOCITY_calc':'VELOCITY'})\n\n wind_df['VELOCITY'] = wind_df.apply(lambda x: (str(x.VELOCITY)), axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda x: 0 if x.VELOCITY == 'XX.X' else x.VELOCITY, axis=1)\n wind_df['fVel'] = wind_df[\"VELOCITY\"].str.split(\".\", n=1, expand=True)[0]\n wind_df = wind_df.loc[wind_df['fVel'].notnull(),:].reset_index(drop=True)\n wind_df = wind_df.loc[wind_df['fVel'] != 'nan',:].reset_index(drop=True)\n wind_df['firstVel'] = wind_df.apply(lambda x: int(x['fVel']), axis=1)\n\n wind_df['sVel'] = wind_df[\"VELOCITY\"].str.split(\".\", n=1, expand=True)[1]\n wind_df = wind_df.loc[wind_df['sVel'].notnull(),].reset_index(drop=True)\n wind_df['secVel'] = wind_df.apply(lambda x: int(x['sVel']), axis=1)\n wind_df['vloc'] = wind_df.apply(lambda x: float(str(x.firstVel) + '.' + str(x.secVel)), axis=1)\n wind_df2 = wind_df.drop(columns=['VELOCITY', 'secVel', 'sVel', 'fVel', 'firstVel'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'vloc': 'VELOCITY'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n ## CORRECT W WIND THING\n wind_df['W'] = wind_df.apply(lambda x: (str(x.W)), axis=1)\n wind_df['W'] = wind_df.apply(lambda x: 0 if x.W == 'XX.X' else x.W, axis=1)\n wind_df['W'] = wind_df.apply(lambda x: '0.0' if x.W == '0' else x.W, axis = 1)\n wind_df['fW'] = wind_df[\"W\"].str.split(\".\", n=1, expand=True)[0]\n # wind_df = wind_df.loc[wind_df['fW'].notnull(),].reset_index(drop=True)\n wind_df['firstW'] = wind_df.apply(lambda x: int(x['fW']), axis=1)\n wind_df['sW'] = wind_df[\"W\"].str.split(\".\", n=1, expand=True)[1]\n # wind_df = wind_df.loc[wind_df['sW'].notnull(),].reset_index(drop=True)\n wind_df['secW'] = wind_df.apply(lambda x: int(x['sW']), axis=1)\n wind_df['wloc'] = wind_df.apply(lambda x: float(str(x.firstW) + '.' + str(x.secW)), axis=1)\n wind_df2 = wind_df.drop(columns=['W', 'secW', 'sW', 'fW', 'firstW'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'wloc': 'W'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n\n ## CORRECT U WIND THING\n wind_df['U'] = wind_df.apply(lambda x: (str(x.U)), axis=1)\n wind_df['U'] = wind_df.apply(lambda x: 0 if x.U == 'XX.X' else x.U, axis=1)\n wind_df['U'] = wind_df.apply(lambda x: '0.0' if x.U == '0' else x.U, axis = 1)\n\n wind_df['fU'] = wind_df[\"U\"].str.split(\".\", n=1, expand=True)[0]\n wind_df['firstU'] = wind_df.apply(lambda x: int(x['fU']), axis=1)\n wind_df['sU'] = wind_df[\"U\"].str.split(\".\", n=1, expand=True)[1]\n wind_df['secU'] = wind_df.apply(lambda x: int(x['sU']), axis=1)\n wind_df['uloc'] = wind_df.apply(lambda x: float(str(x.firstU) + '.' + str(x.secU)), axis=1)\n wind_df2 = wind_df.drop(columns=['U', 'secU', 'sU', 'fU', 'firstU'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'uloc': 'U'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n\n ## CORRECT V WIND THING\n wind_df['V'] = wind_df.apply(lambda x: (str(x.V)), axis=1)\n wind_df['V'] = wind_df.apply(lambda x: 0 if x.V == 'XX.X' else x.V, axis=1)\n wind_df['V'] = wind_df.apply(lambda x: '0.0' if x.V == '0' else x.V, axis = 1)\n\n wind_df['fV'] = wind_df[\"V\"].str.split(\".\", n=1, expand=True)[0]\n wind_df['firstV'] = wind_df.apply(lambda x: int(x['fV']), axis=1)\n wind_df['sV'] = wind_df[\"V\"].str.split(\".\", n=1, expand=True)[1]\n wind_df['secV'] = wind_df.apply(lambda x: int(x['sV']), axis=1)\n wind_df['vloc'] = wind_df.apply(lambda x: float(str(x.firstV) + '.' + str(x.secV)), axis=1)\n wind_df2 = wind_df.drop(columns=['V', 'secV', 'sV', 'fV', 'firstV'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'vloc': 'V'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n\n wind_df['U_cor'] = wind_df.apply(lambda row: float(row['U']) + float(row['VELOCITY']), axis=1)\n wind_df['horz_length'] = wind_df.apply(lambda row: np.sqrt(row['U_cor'] ** 2 + row['V'] ** 2), axis=1)\n wind_df['uncor_theta'] = wind_df.apply(\n lambda row: calc_bearing(row['U_cor'], row['V'], row['QUADRANT'], row['horz_length'], radians), axis=1)\n wind_df['adj_theta'] = wind_df.apply(lambda row: (row['uncor_theta'] + row['bearing']) % 360, axis=1)\n wind_df['totalWind'] = wind_df.apply(lambda row: np.sqrt(row['horz_length'] ** 2 + row['W'] ** 2), axis=1)\n wind_df['phi'] = wind_df.apply(lambda row: np.arctan(row['horz_length']), axis=1)\n\n wind_df['adj_v'] = wind_df.apply(lambda row: -row['horz_length'] * np.cos(row['adj_theta']), axis=1)\n wind_df['adj_u'] = wind_df.apply(lambda row: row['horz_length'] * np.sin(row['adj_theta']), axis=1)\n\n ## GO THROUGH WIND\n window_size = 30\n u_series = pd.Series(wind_df['adj_u'])\n u_windows = u_series.rolling(window_size)\n u_averages = pd.DataFrame(u_windows.mean())\n u_averages.columns = ['U_avg']\n u_averages['key'] = u_averages.index\n\n v_series = pd.Series(wind_df['adj_v'])\n v_windows = v_series.rolling(window_size)\n v_averages = pd.DataFrame(v_windows.mean())\n v_averages.columns = ['V_avg']\n v_averages['key'] = v_averages.index\n\n w_series = pd.Series(wind_df['W'])\n w_windows = w_series.rolling(window_size)\n w_averages = pd.DataFrame(w_windows.mean())\n w_averages.columns = ['W_avg']\n w_averages['key'] = w_averages.index\n\n vw_df = w_averages.set_index('key').join(v_averages.set_index('key'))\n vw_df['key'] = vw_df.index\n uvw_df = vw_df.set_index('key').join(u_averages.set_index('key'))\n uvw_df['key'] = uvw_df.index\n wind_df2 = wind_df.copy()\n wind_df2['key'] = wind_df2.index\n wind_df = uvw_df.set_index('key').join(wind_df2.set_index('key'))\n\n wind_df['r_avg'] = wind_df.apply(lambda row: np.sqrt(row['U_avg'] ** 2 + row['V_avg'] ** 2), axis=1)\n wind_df['theta_avg'] = wind_df.apply(lambda row: 0 if row.V_avg == 0 else np.arctan(-row['U_avg'] / row['V_avg']), axis=1)\n # wind_df2 = wind_df[wind_df.CH4.notnull()]\n wind_df3 = wind_df[wind_df.CH4.notnull()].drop(columns=\n ['QUADRANT', 'secnan', 'prev_LAT', 'next_LAT', 'prev_LONG',\n 'next_LONG', 'prev_TIME', 'next_TIME',\n 'timediff', 'uncor_theta', 'CH4', 'R'], axis=1)\n wind_df3['CH4'] = wind_df3.loc[:, 'shift_CH4']\n wind_df3['R'] = wind_df3.loc[:, 'shift_R']\n wind_df3 = wind_df3.drop(['shift_CH4', 'shift_R'], axis=1)\n # wind_df4 = wind_df3.loc[wind_df3.totalWind.notnull(),:]\n wind_df3['odometer'] = wind_df3.loc[:, 'distance'].cumsum()\n wind_df4 = wind_df3.loc[:,\n ['DATE', 'TIME', 'SECONDS', 'NANOSECONDS', 'VELOCITY', 'U', 'V', 'W', 'BCH4', 'BRSSI', 'TCH4',\n 'TRSSI',\n 'PRESS_MBAR', 'INLET', 'TEMPC', 'CH4', 'H20', 'C2H6', 'R', 'C2C1', 'BATTV', 'POWMV', 'CURRMA',\n 'SOCPER',\n 'LAT', 'LONG', 'bearing', 'U_cor', 'horz_length', 'adj_theta', 'totalWind',\n 'phi', 'raw_CH4', 'raw_R', 'U_avg', 'V_avg', 'W_avg', 'r_avg', 'theta_avg', 'distance', 'odometer']]\n\n # wind_df7 = add_odometer(wind_df4,'LAT','LONG')\n\n # wind_df4 = wind_df7.copy()\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > xMinCarSpeed, :]\n wind_df6 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :]\n\n # wind_df6 = wind_df6a.loc[wind_df6a.R > .6999, :]\n\n del (wind_df4)\n wind_df4 = wind_df6.copy().drop_duplicates()\n wind_df5 = wind_df4.loc[wind_df4.CH4.notnull(), :]\n wind_df4 = wind_df5.copy()\n wind_df4 = wind_df5.copy()\n if bFirst:\n wind_df4.to_csv(fnOut, index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(fnOut, index=False)\n os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def process_raw_data_what(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack, shift, maxSpeed='45',\n minSpeed='2'):\n import pandas as pd\n from datetime import datetime\n import os,gzip,csv,sys\n from numpy import pi\n import numpy as np\n radians = False\n\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n\n # reading in the data with specific headers\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n infoHeader = \"FILENAME\\n\"\n # somehow gZIP is indicating if it is the first file name (I think if it is 0 then it is the first file)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename, 'r')\n else:\n f = open(xDir + \"/\" + xFilename, 'r')\n\n infoHeader = \"FILENAME\\n\"\n\n # process\n # if first time on this car/date, then write header out\n headerNames = sHeader.split(',')\n xdat = str('20') + xFilename[11:17]\n # xdat = str(xFilename[len(xCar)+1:len(xCar) + 9])\n\n # fnOut = xOutDir + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_dat.csv\" #set CSV output for raw data\n # fnLog = xOutDir + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_log.csv\" #output for logfile\n\n fnOut = xOut + xCar + \"_\" + xdat + \"_dat.csv\" # set CSV output for raw data\n fnLog = xOut + xCar + \"_\" + xdat + \"_log.csv\" # output for logfile\n infOut = xOut + xCar + \"_\" + xdat + \"_info.csv\"\n #\n\n dtime = open(xDir + xFilename).readlines().pop(1).split(',')[0]\n firstdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]), int(dtime[14:16]),\n int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n #firsttime = firstdate.strftime('%s.%f')\n firsttime = dt_to_epoch(firstdate)\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n if bFirst:\n # fOut = open(fnOut, 'w')\n # fOut.write(sOutHeader)\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n if not bFirst:\n fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n fOut = open(fnOutTemp, 'w')\n fOut.write(sOutHeader)\n\n # read all lines\n xCntObs = -1\n xCntGoodValues = 0\n for row in f:\n bGood = True\n if xCntObs < 0:\n bGood = False\n xCntObs += 1\n if bGood:\n lstS = row.split(\",\")\n dtime = lstS[0]\n dateob = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n # epoch = dateob.strftime('%s.%f')\n # dtime = int(dateob.strftime('%Y%m%d%H%M%S'))\n\n fdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n #seconds = fdate.strftime('%s.%f')\n seconds = float(dt_to_epoch(fdate)) * 1e-3\n\n if 1 == 2: #sys.platform.startswith('win'):\n csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(dateob.strftime('%H:%M:%S')) + ',' + str(\n int(pd.to_numeric(dateob.strftime('%S.%f')))) + ',' + str(\n pd.to_numeric(dateob.strftime('%f')) * 1000) + str(',')\n csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(\n lstS[4]) + ',' + str('0') + ',' + str(lstS[4]) + ','\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(\n lstS[4]) + ',' + str(lstS[5]) + ',' + str(lstS[6]) + ','\n csvWrite += str(lstS[7]) + ',' + str(lstS[8]) + ',' + str(lstS[9]) + ',' + str(\n lstS[10]) + ',' + str(lstS[11]) + ',' + str(lstS[12]) + ',' + str(lstS[13]) + str(',') + str(\n lstS[14])\n if 1==1:\n csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(dateob.strftime('%H:%M:%S')) + ',' + str(\n str(seconds)[:10]) + ',' + str(int(pd.to_numeric(str(seconds)[11:]) * 1000)) + str(',')\n csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(\n lstS[4]) + ',' + str('0') + ',' + str(lstS[4]) + ','\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(\n lstS[4]) + ',' + str(lstS[5]) + ',' + str(lstS[6]) + ','\n csvWrite += str(lstS[7]) + ',' + str(lstS[8]) + ',' + str(lstS[9]) + ',' + str(\n lstS[10]) + ',' + str(lstS[11]) + ',' + str(lstS[12]) + ',' + str(lstS[13]) + str(',') + str(\n lstS[14])\n if float(seconds) >= (float(firsttime) + (60 * float(initialTimeBack))):\n fOut.write(csvWrite)\n del (seconds)\n del (csvWrite)\n\n xCntObs += 1\n\n # sOut = str(gZIP) + \",\" + str(f) + \",\" + str(xCntObs) + \",\" + str(xCntGoodValues) + \"\\n\"\n # fLog.write(sOut)\n infOut.write(str(xFilename) + '\\n')\n\n fOut.close()\n fLog.close()\n infOut.close()\n\n print(f\"{xCar} \\t {xdat} \\t {fnOut[-(17 + len(xCar)):]} \\t {xCntObs} \\t {xCntGoodValues} \\t {gZIP}\")\n\n wind_df = pd.read_csv(fnOutTemp)\n wind_df['QUADRANT'] = wind_df.apply(lambda row: get_quadrant(row['U'], row['V']), axis=1)\n wind_df['secnan'] = wind_df.apply(lambda row: row['SECONDS'] + row['NANOSECONDS'] * 1e-9,\n axis=1) # + row['NANOSECONDS']*1e-9,axis=1)\n wind_df['prev_LAT'] = wind_df.LAT.shift(periods=1)\n wind_df['next_LAT'] = wind_df.LAT.shift(periods=-1)\n wind_df['prev_LONG'] = wind_df.LONG.shift(periods=1)\n wind_df['next_LONG'] = wind_df.LONG.shift(periods=-1)\n wind_df['prev_TIME'] = wind_df.secnan.shift(periods=1)\n wind_df['next_TIME'] = wind_df.secnan.shift(periods=-1)\n wind_df['distance'] = wind_df.apply(\n lambda row: haversine(row['prev_LAT'], row['prev_LONG'], row['next_LAT'], row['next_LONG']), axis=1)\n wind_df['bearing'] = wind_df.apply(\n lambda row: calc_bearing(row['prev_LAT'], row['next_LAT'], row['prev_LONG'], row['next_LONG'], radians),\n axis=1)\n wind_df['timediff'] = wind_df.apply(lambda row: row['next_TIME'] - row['prev_TIME'], axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda row: calc_velocity(row['timediff'], row['distance']), axis=1)\n wind_df['U_cor'] = wind_df.apply(lambda row: row['U'] + row['VELOCITY'], axis=1)\n wind_df['horz_length'] = wind_df.apply(lambda row: np.sqrt(row['U_cor'] ** 2 + row['V'] ** 2), axis=1)\n wind_df['uncor_theta'] = wind_df.apply(\n lambda row: calc_bearing(row['U_cor'], row['V'], row['QUADRANT'], row['horz_length'], radians), axis=1)\n wind_df['adj_theta'] = wind_df.apply(lambda row: (row['uncor_theta'] + row['bearing']) % 360, axis=1)\n wind_df['totalWind'] = wind_df.apply(lambda row: np.sqrt(row['horz_length'] ** 2 + row['W'] ** 2), axis=1)\n wind_df['phi'] = wind_df.apply(lambda row: np.arctan(row['horz_length']), axis=1)\n wind_df['shift_CH4'] = wind_df.CH4.shift(periods=int(float(shift)))\n wind_df['raw_CH4'] = wind_df.apply(lambda row: row['BCH4'], axis=1)\n wind_df['BCH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['CH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['TCH4'] = wind_df.loc[:, ['shift_CH4']]\n\n wind_df2 = wind_df[wind_df.CH4.notnull()]\n wind_df2 = wind_df.copy()\n wind_df3 = wind_df2.drop(\n ['QUADRANT', 'secnan', 'prev_LAT', 'next_LAT', 'prev_LONG', 'next_LONG', 'prev_TIME', 'next_TIME',\n 'timediff', 'uncor_theta', 'CH4'], axis=1)\n wind_df3['CH4'] = wind_df3.loc[:, 'shift_CH4']\n wind_df3 = wind_df3.drop(['shift_CH4'], axis=1).loc[:,\n ['DATE', 'TIME', 'SECONDS', 'NANOSECONDS', 'VELOCITY', 'U', 'V', 'W', 'BCH4', 'BRSSI', 'TCH4',\n 'TRSSI',\n 'PRESS_MBAR', 'INLET', 'TEMPC', 'CH4', 'H20', 'C2H6', 'R', 'C2C1', 'BATTV', 'POWMV', 'CURRMA',\n 'SOCPER',\n 'LAT', 'LONG', 'bearing', 'U_cor', 'horz_length', 'adj_theta', 'totalWind', 'phi', 'raw_CH4',\n 'distance']]\n wind_df3['odometer'] = wind_df3.loc[:, 'distance'].cumsum()\n wind_df4 = wind_df3.copy()\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > xMinCarSpeed, :]\n wind_df4 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :].copy().drop_duplicates()\n wind_df5 = wind_df4.loc[wind_df4.CH4.notnull(), :]\n wind_df4 = wind_df5.copy()\n if bFirst:\n wind_df4.to_csv(fnOut, index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(fnOut, index=False)\n os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def load(self):\n #print self.fileInfo.name\n progress = self.progress\n filePath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n self.fileSize = os.path.getsize(filePath)\n #--Localize\n cells = self.cells\n records = self.records\n canSave = self.canSave\n skipObjRecords = self.skipObjRecords\n contTypes = set(['CREC','CNTC','NPCC'])\n levTypes = set(('LEVC','LEVI'))\n debrisIds = self.debrisIds\n debrisTypes = set(debrisIds.keys())\n #--Header\n inPath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n ins = Tes3Reader(self.fileInfo.name,file(inPath,'rb'))\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n self.tes3 = Tes3(name,size,delFlag,recFlag,ins,True)\n if not canSave: del self.tes3.others[:]\n #--Progress info\n progress = self.progress\n progress(0.0,'Loading '+self.fileInfo.name)\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n #print \"%s [%d]\" % (name,size)\n #--CELL?\n if name == 'CELL':\n record = Cell(name,size,delFlag,recFlag,ins,0,skipObjRecords)\n cells.append(record)\n if canSave: records.append(record)\n #--Contents\n elif canSave and name in contTypes:\n if name == 'CREC':\n record = Crec(name,size,delFlag,recFlag,ins,True)\n elif name == 'CNTC':\n record = Cntc(name,size,delFlag,recFlag,ins,True)\n else:\n record = Npcc(name,size,delFlag,recFlag,ins,True)\n self.conts.append(record)\n self.conts_id[record.getId()] = record\n records.append(record)\n #--File Map\n elif name == 'FMAP':\n record = Fmap(name,size,delFlag,recFlag,ins)\n self.fmap = record\n records.append(record)\n #--Landscapes\n elif name == 'LAND':\n record = Land(name,size,delFlag,recFlag,ins)\n self.lands[record.getId()] = record\n records.append(record)\n #--Scripts\n elif canSave and name == 'SCPT':\n record = Scpt(name,size,delFlag,recFlag,ins,True)\n records.append(record)\n if record.getRef():\n self.refs_scpt[record] = record.getRef()\n #--Save debris info?\n elif name in debrisTypes:\n record = Record(name,size,delFlag,recFlag,ins)\n id = record.getId()\n if id:\n debrisIds[name].append(id.lower())\n if canSave:\n records.append(record)\n #--Skip Non-cell?\n elif not canSave:\n ins.seek(size,1,name)\n #--Keep non-cell?\n else:\n records.append(Record(name,size,delFlag,recFlag,ins))\n #--Done Reading\n ins.close()\n #--Analyze Cells\n cntCells = 0\n progress.setMax(len(self.cells))\n for cell in self.cells:\n cell.load(None,1)\n self.cells_id[cell.getId()] = cell\n if not canSave:\n cell.data = None #--Free some memory\n #--Progress\n cntCells += 1\n progress(cntCells)\n #--Scripts\n if self.refs_scpt:\n self.updateScptRefs()", "def read_data(infile):\n extension = os.path.splitext(infile)[1]\n h = read_header(infile)\n nx = int(h['num_x_pts'])\n ny = int(h['num_y_pts'])\n nt = int(h['num_t_pts'])\n fid = open(infile, 'rb')\n fid.seek(512) #skip header\n if extension == '.aps' or extension == '.a3daps':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, ny, nt, order='F').copy() #make N-d image\n elif extension == '.a3d':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, nt, ny, order='F').copy() #make N-d image\n elif extension == '.ahi':\n data = np.fromfile(fid, dtype = np.float32, count = 2* nx * ny * nt)\n data = data.reshape(2, ny, nx, nt, order='F').copy()\n real = data[0,:,:,:].copy()\n imag = data[1,:,:,:].copy()\n fid.close()\n if extension != '.ahi':\n return data\n else:\n return real, imag", "def __init__(self, path: str):\n self._path = path\n self._fp = gzip.open(self._path, mode=\"r\")", "def read_gz(images,labels):\n\t# Open the images with gzip in read binary mode\n\t# images = gzip.open('../MNIST-data/train-images-idx3-ubyte.gz', 'rb')\n\t# labels = gzip.open('../MNIST-data/train-labels-idx1-ubyte.gz', 'rb')\n\n\t# Read the binary data\n\n\t# We have to get big endian unsigned int. So we need '>I'\n\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]#28\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]#28\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0] #60000\n\t# print(number_of_images);\n\n\tif number_of_images != N:\n\t raise Exception('number of labels did not match the number of images')\n\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array #60000X28X28\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t if i % 1000 == 0:\n\t print(\"i: %i\" % i)\n\t for row in range(rows):\n\t for col in range(cols):\n\t tmp_pixel = images.read(1) # Just a single byte\n\t tmp_pixel = unpack('>B', tmp_pixel)[0]\n\t x[i][row][col] = tmp_pixel\n\t tmp_label = labels.read(1)\n\t y[i] = unpack('>B', tmp_label)[0]\n\t # print(y.shape)#60000X1\n\treturn (x, y)", "def _load(self):\n # Extract the ASCII header (5 first lines)\n with open(self._xst_bin, 'rb') as f:\n header = list(islice(f, 0, 5))\n assert header[0] == b'HeaderStart\\n',\\\n 'Wrong header start'\n assert header[-1] == b'HeaderStop\\n',\\\n 'Wrong header stop'\n header = [s.decode('utf-8') for s in header]\n hd_size = sum([len(s) for s in header])\n\n # Parse informations into a metadata dictionnary\n keys = ['freq', 'ma', 'accu']\n search = ['Freq.List', 'Mr.List', 'accumulation']\n types = ['float64', 'int', 'int']\n for key, word, typ in zip(keys, search, types):\n for h in header:\n if word in h:\n self.meta[key] = np.array(\n h.split('=')[1].split(','),\n dtype=typ\n )\n\n # Deduce the dtype for decoding\n n_ma = self.meta['ma'].size\n n_sb = self.meta['freq'].size\n dtype = np.dtype(\n [('jd', 'float64'),\n ('data', 'complex64', (n_sb, n_ma*n_ma*2 + n_ma))]\n )\n\n # Decoding the binary file\n tmp = np.memmap(\n filename=self._xst_bin,\n dtype='int8',\n mode='r',\n offset=hd_size\n )\n decoded = tmp.view(dtype)\n\n self.data = decoded['data'] / self.meta['accu']\n self.time = Time(decoded['jd'], format='jd', precision=0)\n\n return", "def __init__(self, input_stream, level=9):\n super(Gzip, self).__init__(input_stream)\n\n self._level = level", "def load_data(path, rng, epoch, batch_size, x_,y_):\n #global x_,t_,y_,\n #global first_report2 \n #first_report2 = True\n start_time = time()\n v,p,skeleton_feature,l = load_gzip(path)\n v = v[:,:,:res_shape[2]]\n res_shape[0] = v.shape[0]\n v_new = empty(res_shape,dtype=\"uint8\")\n\n for i in xrange(v.shape[0]): #batch\n if p[i] < 10: p[i] = 100\n ofs = p[i]*ratio\n mid = v.shape[-1]/2.\n sli = None\n if ofs < mid:\n start = int(round(mid-ofs))\n end = int(round(mid+ofs))\n sli = slice(start,end)\n\n for j in xrange(v.shape[2]): #maps\n for k in xrange(v.shape[3]): #frames\n #body\n img = v[i,0,j,k]\n img = cut_img(img,5)\n img = misc.imresize(img,(h,h))\n # if j==0: img = 255-misc.imfilter(img,\"contour\")\n v_new[i,0,j,k] = img\n\n #hand\n img = v[i,1,j,k]\n img = img[sli,sli]\n img = misc.imresize(img,(h,h))\n v_new[i,1,j,k] = img\n\n vid, lbl = v_new,l\n\n #if epoch==0: print \"get in\",str(time()-start_time)[:3]+\"s\",\n # shuffle data\n ind = rng.permutation(l.shape[0])\n ind = ind[:batch_size]\n vid = vid[:,:,:,:4,:,:]\n vid, skeleton_feature, lbl = vid[ind].astype(floatX), skeleton_feature[ind].astype(floatX),lbl[ind].astype(floatX)\n #vid, skeleton_feature, lbl = vid.astype(floatX), skeleton_feature.astype(floatX),lbl.astype(floatX)\n\n # vid = vid/(255./(scaler*2.))-scaler\n #traj = traj/(255./(scaler_traj*2.))-scaler_traj\n # traj = traj/(255./5.)\n\n # Wudi already made labels start from 0\n #lbl -= 1 \n\n #if first_report2:\n # print \"data range:\",vid.min(),vid.max()\n # print \"traj range:\",skeleton_feature.min(),skeleton_feature.max()\n # print \"lbl range:\",lbl.min(),lbl.max()\n # first_report2 = False\n\n # set value\n x_.set_value(vid, borrow=True)\n #t_.set_value(skeleton_feature, borrow=True)\n y_.set_value(lbl, borrow=True)", "def _read_data(self):\n with self._open(self.filename, 'rb') as f:\n try:\n f.seek(self._offset_data, self._offset_whence)\n except IOError:\n print('Error: hedp.io.HamamatsuFile seeking outside of file limits.')\n print(' Failed to parse file.')\n print(\" Either the 'offset' or 'dtype' input arguments must be wrong!\")\n raise\n except:\n raise\n\n data_len = np.prod(self.shape)*np.dtype(self._dtype).itemsize\n data_str = f.read(data_len)\n if data_len != len(data_str):\n print(data_len, len(data_str))\n raise ValueError('File ended before all data was read. Probably wrong offset or dtype!')\n\n\n self.data = np.fromstring(data_str, dtype=self._dtype).reshape(self.shape[::-1])\n self.data = np.ndarray.astype(self.data, 'float32')\n\n #self.data = np.fromfile(f, dtype=self._dtype,\n # count=np.prod(self.shape)).reshape(self.shape[::-1])", "def read_texture_file(filename):\n \n # Deal with compressed files.\n import os\n if (os.path.splitext(filename)[1] == '.gz'):\n import gzip\n f = gzip.open(filename, 'rb')\n else:\n f = open(filename, 'r')\n\n # Stuff everything into a dict and a list\n # for now. Sort this out later (we will probably \n # want to have objects at some point\n header_data = {}\n particles = []\n\n header_lines = 5\n particle_header_lines = 9\n \n for line in f:\n if header_lines == 5:\n header_data['theia_lun'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 4:\n header_data['npartsallo'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 3:\n header_data['npartsused'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 2:\n header_data['n_expected_particles'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 1:\n header_data['nseen_particles'] = int(line)\n header_lines = header_lines - 1\n elif header_lines == 0:\n if particle_header_lines == 9:\n this_particle = {}\n this_particle['process_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 8:\n this_particle['particle_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 7:\n this_particle['old_particle_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 6:\n this_particle['old_process_id'] = int(line)\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 5:\n this_particle['particle_class'] = line.strip()\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 4:\n this_particle['particle_position'] = np.array(\n [line[0:12], line[12:24], line[24:36]])\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 3:\n this_particle['idata_count'] = int(line)\n if this_particle['idata_count'] > 0:\n particle_header_lines = particle_header_lines - 1\n else:\n particle_header_lines = particle_header_lines - 2\n elif particle_header_lines == 2:\n this_particle['particle_idata'] = np.array(\n [line.rstrip('\\r\\n')[i:i+12] for i in xrange(0, len(line.rstrip('\\r\\n')), 12)]\n )\n particle_header_lines = particle_header_lines - 1\n elif particle_header_lines == 1:\n this_particle['rdata_count'] = int(line)\n if this_particle['rdata_count'] > 0:\n particle_header_lines = particle_header_lines - 1\n else:\n particles.append(this_particle)\n particle_header_lines = 9\n elif particle_header_lines == 0:\n this_particle['particle_rdata'] = np.array(\n [line.rstrip('\\r\\n')[i:i+14] for i in xrange(0, len(line.rstrip('\\r\\n')), 14)]\n )\n particles.append(this_particle)\n particle_header_lines = 9\n f.close()\n\n return header_data, particles", "def get_inf_sizes(song_or_key):\n if isinstance(song_or_key, basestring):\n k = song_or_key\n else:\n k = song_key(song_or_key)\n path = os.path.join(LYRICS_DIR, k+'.txt.gz.infgen')\n with open(path) as f:\n return parse_infgen.parse_ratio(f)", "def extract_data(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(28 * 28 * 10000 * 1)\n data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)\n data = (data - (255 / 2.0)) / 255\n data = data.reshape(10000, 28, 28, 1)\n return data", "def LoadMMMetaData(filename):\r\n## print \"loading MM Metadata\"\r\n file = open(filename,'r')\r\n data = file.read()\r\n file.close()\r\n data = data.replace(\"false\",\"False\")\r\n data = data.replace(\"true\",\"True\")\r\n data = data.replace(\"null\",\"0\")\r\n f = eval(str(data))\r\n tiles = []\r\n for i in f.keys():\r\n if i != \"Summary\":\r\n tiles.append(i)\r\n xpos = f[tiles[0]][\"XPositionUm\"]\r\n ypos = f[tiles[0]][\"YPositionUm\"]\r\n zpos = f[tiles[0]][\"ZPositionUm\"] \r\n ScaleFactorX= f[\"Summary\"][\"PixelSize_um\"]\r\n ScaleFactorY= ScaleFactorX\r\n Width=f[\"Summary\"][\"Width\"]\r\n Height=f[\"Summary\"][\"Height\"]\r\n extent=[xpos-(Width/2)*ScaleFactorX,xpos+(Width/2)*ScaleFactorX,\\\r\n ypos-(Height/2)*ScaleFactorY,ypos+(Height/2)*ScaleFactorY] #FOR NOW\r\n\r\n #WHY WAS IT + THEN - FOR Y??\r\n return extent,zpos", "def _load(self):\n\n # number of non-data header details at top of data file\n header = 1\n\n # open file\n weatherData = []\n with open(self.wfile) as myfile:\n if (self.lines > 0):\n weatherData = [next(myfile) for x in xrange(self.lines + header)]\n else:\n weatherData = myfile.readlines()\n\n # get data stream from first line\n streamHeader = weatherData.pop(0).rstrip()\n if (streamHeader == 'FULL'):\n self.dataStream = 0\n elif (streamHeader == 'ADVANCED'):\n self.dataStream = 1\n elif (streamHeader == 'BASIC'):\n self.dataStream = 2\n else:\n print \"Error: unecognised data stream from file %s\" % (self.wfile)\n return -1\n\n # read data\n inputData = []\n for line in weatherData:\n entries = line.split()\n inputData.append(entries)\n\n # copy all into np array\n self.data = np.array(inputData)\n\n return 0", "def _read_member_header(self):\n header = _read_gzip_header(self._fileobj)\n offset = self._fileobj.tell()\n if \"RA\" not in header[\"extra_field\"]:\n try:\n if self._fileobj.seekable():\n self.stream.seek(0)\n except AttributeError:\n pass\n raise IOError(\"Not an idzip file: %r\" % self.name)\n\n dictzip_field = _parse_dictzip_field(header[\"extra_field\"][\"RA\"])\n num_member_chunks = len(dictzip_field[\"zlengths\"])\n\n start_chunk_index = len(self._chunks)\n for zlen in dictzip_field[\"zlengths\"]:\n self._chunks.append((offset, zlen))\n offset += zlen\n self._last_zstream_end = offset\n\n chlen = dictzip_field[\"chlen\"]\n sure_size = chlen * (num_member_chunks - 1)\n self._add_member(chlen, start_chunk_index, sure_size)", "def test_gzip_file_no_extension(self):\n # Write the data to a file\n temp_file = tempfile.NamedTemporaryFile()\n with gzip.open(temp_file.name, 'wb') as out:\n for item in self.data:\n serialzed = json.dumps(item).encode()\n out.write(serialzed + b'\\n')\n\n # Load from file, ensure it is correct\n actual_data = []\n with JsonlReader(temp_file.name) as f:\n for item in f:\n actual_data.append(item)\n self.assertEqual(self.data, actual_data)", "def _read_data(self):", "def process_raw_data_eng(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack,\n shift, maxSpeed='45', minSpeed='2'):\n import pandas as pd\n from datetime import datetime\n import os\n import gzip\n import sys\n from math import floor\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n\n ########################################################################\n #### WE DON'T HAVE AN RSSI INPUT\n ### (SO THIS IS A PLACEHOLDER FOR SOME SORT OF QA/QC VARIABLE)\n ## xMinRSSI = 50 #if RSSI is below this we don't like it\n ##################################################################\n\n # reading in the (.txt) data with specific headers --> need to change this\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n # sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n # sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n # sHeader = \"Time Stamp,Inlet Number,P (mbars),T0 (degC),T5 (degC), Laser PID Readout,Det PID Readout,win0Fit0,win0Fit1,win0Fit3,win1Fit4,win0Fit5,win0Fit6,win0Fit7,win0Fit8,win0Fit9,win1Fit0,win1Fit1,win1Fit2,win1Fit3,win1Fit4,win1Fit5,win1Fit6,Det Bkgd,Ramp Ampl,CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Battery T (degC),FET T (degC),GPS Time,Latitude,Longitude\"\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T0 (degC),T5 (degC),Laser PID Readout,Det PID Readout,win0Fit0,win0Fit1,win0Fit2,win0Fit3,win0Fit4,win0Fit5,win0Fit6,win0Fit7,win0Fit8,win0Fit9,win1Fit0,win1Fit1,win1Fit2,win1Fit3,win1Fit4,win1Fit5,win1Fit6,Det Bkgd,Ramp Ampl,CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Battery T (degC),FET T (degC),GPS Time,Latitude,Longitude\"\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n\n headerNames = sHeader.split(',')\n GPS_loc = 37 # Where the GPS data is located (in the row)\n\n infoHeader = \"FILENAME\\n\"\n\n # gZIP is indicating if it is a ZIP file (I don't think I've written this in)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename,'r')\n else:\n #f = open(xDir + \"/\" + xFilename, 'r')\n f = open(xDir + xFilename, 'r')\n\n ### FIGURING OUT DATE FROM FILENAME (WILL NEED TO CHANGE THIS IF DIFFERENT FILENAME)\n xdat = str('20') + xFilename[11:17]\n\n fnOut = xOut + xCar + \"_\" + xdat + \"_dat.csv\" # set CSV output for raw data\n fnLog = xOut + xCar + \"_\" + xdat + \"_log.csv\" # output for logfile\n infOut = xOut + xCar + \"_\" + xdat + \"_info.csv\"\n\n # FINDING THE FIRST TIME NOTED\n firsttime = int(float(open(xDir + xFilename).readlines().pop(1).split(',')[37][:-4]))\n\n ## MAKING TEMPORARY FILE (FOR IF LATER YOU HAVE TO ADD A DATE)\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n if bFirst:\n # 3fOut = open(fnOutTemp, 'w')\n # fOut.write(sOutHeader)\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n if not bFirst:\n fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n fOut = open(fnOutTemp, 'w')\n fOut.write(sOutHeader)\n\n # READ IN THE LINES\n xCntObs = -1\n xCntGoodValues = 0\n for row in f:\n bGood = True\n if xCntObs < 0:\n bGood = False\n xCntObs += 1\n\n if bGood:\n lstS = row.split(',')\n gpstime = lstS[GPS_loc]\n dtime = lstS[0]\n dt = lstS[1]\n time_dt = lstS[2]\n epoch = lstS[3]\n # nano = lstS[4]\n\n gps_time = lstS[37]\n dateob = datetime.fromtimestamp(int(gps_time[:-4]))\n nano = gps_time[-4:]\n\n # dateob = datetime(int(dt[0:4]),int(dt[5:7]),int(dt[8:10]),int(time_dt[0:2]),int(time_dt[3:5]),int(time_dt[6:8]),int(float(nano)*1e-9))\n\n dtime = int(dateob.strftime('%Y%m%d%H%M%S'))\n # Date = dateob.strftime('%m%/%d/%Y')\n Date = dateob.strftime('%Y-%m-%d')\n\n GPS_Time = dateob.strftime('%H%:%M:%S')\n seconds = floor(float(gpstime))\n nano = dateob.strftime('%f')\n\n # dateob = datetime(int(dtime[6:10]),int(dtime[0:2]),int(dtime[3:5]),int(dtime[11:13]),int(dtime[14:16]),int(dtime[17:19]),int(float(dtime[19:23])*1000000))\n # epoch = dateob.strftime('%s.%f')\n\n # THIS IS USING THE CSU METHOD. IN OUR METHOD, WE DO THE SPEED LATER IN THE ALGORITHM.\n\n # # if RSSI of bottome sensor is below 50 if float(lstS[28]) < xMinRSSI: fLog.write(\"RSSI (Bottom)\n # value less than 50: \"+ str(lstS[28]) + \"\\n\") continue # Car Speed if float(lstS[12]) >\n # xMaxCarSpeed: fLog.write(\"Car speed of \" + str(float(lstS[12])) + \" exceeds max threshold of: \" +\n # str(xMaxCarSpeed) + \"\\n\") continue if float(lstS[12]) < xMinCarSpeed: fLog.write(\"Car speed of \" +\n # str(float(lstS[12])) + \" less than min threshold of: \" + str(xMinCarSpeed) + \"\\n\") continue\n\n # For some reason it is producing its longitude in positive number while USA is located at negative longitude\n # thats why we do -1 * float(lstS[7])\n\n # fix this when we have stuffs\n\n # s1 = str(lstS[1])+\",\"+str(lstS[2])+\",\"+str(lstS[3])+\",\"+str(lstS[4])+\",\"+str(lstS[6])+\",\"\n # s1 += str(-1 * float(lstS[7]))+\",\"+str(lstS[12])+\",\"+str(lstS[14])+\",\"+str(lstS[15])+\",\"+str(lstS[16])+\",\"+str(lstS[25])+\",\"\n # s1 += str(lstS[28])+\",\"+str(lstS[38])+\",\"+str(lstS[41])+\"\\n\"\n\n ## choosing what to write in the .csv\n\n # if sys.platform.startswith('win'):\n # ## DATE, TIME, SECONDS,NANOSECONDS\n # csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(dateob.strftime('%H:%M:%S')) + ',' + str(\n # float(pd.to_numeric(dateob.strftime('%S.%f')))) + ',' + str(\n # pd.to_numeric(dateob.strftime('%f')) * 1000) + str(',')\n # ## VELOCITY, U,V,W,BCH4,BRSSI,TCH4\n # csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(\n # lstS[26]) + ',' + str('0') + ',' + str(lstS[26]) + ','\n # ## TRSSI, PRESS_MBAR, INLET, TEMPC, CH4, H20,C2H6\n # csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(\n # lstS[26]) + ',' + str(lstS[27]) + ',' + str(lstS[28]) + ','\n # # R, C2C1, BATTV, POWMV,CURRMA, SOCPER,LAT,LONG\n # csvWrite += str(lstS[29]) + ',' + str(lstS[30]) + ',' + str(lstS[31]) + ',' + str(\n # lstS[32]) + ',' + str(lstS[33]) + ',' + str(lstS[34]) + ',' + str(lstS[38]) + str(',') + str(\n # lstS[39])\n\n # =============================================================================\n # if not sys.platform.startswith('win'):\n # ## DATE, TIME, SECONDS,NANOSECONDS\n # csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(dateob.strftime('%H:%M:%S')) + ',' + str((int(floor(pd.to_numeric(dateob.strftime('%s.%f')))))) + ',' + str((pd.to_numeric(dateob.strftime('%f')) *1000)) + str(',')\n # ## VELOCITY, U,V,W,BCH4,BRSSI,TCH4\n # csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(lstS[26]) + ',' + str('0') + ','+ str(lstS[26]) + ','\n # ## TRSSI, PRESS_MBAR, INLET, TEMPC, CH4, H20,C2H6\n # csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(lstS[26]) + ',' + str(lstS[27]) +',' + str(lstS[28]) + ','\n # # R, C2C1, BATTV, POWMV,CURRMA, SOCPER,LAT,LONG\n # csvWrite += str(lstS[29]) + ',' + str(lstS[30]) + ',' + str(lstS[31]) + ',' + str(lstS[32]) + ','+ str(lstS[33]) + ',' + str(lstS[34]) + ',' + str(lstS[38]) + str(',') + str(lstS[39][:-1]) + str('\\n')\n # #fOut.write('\\n')\n # fOut.write(csvWrite)\n # #fOut.write('\\n')\n #\n # =============================================================================\n # if not sys.platform.startswith('win'):\n if 1 == 1:\n ## DATE, TIME, SECONDS,NANOSECONDS\n csvWrite = str(Date) + ',' + str(GPS_Time) + ',' + str(seconds) + ',' + str(nano) + str(',')\n ## VELOCITY, U,V,W,BCH4,BRSSI,TCH4\n csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(\n lstS[26]) + ',' + str('0') + ',' + str(lstS[26]) + ','\n ## TRSSI, PRESS_MBAR, INLET, TEMPC, CH4, H20,C2H6\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(lstS[3]) + ',' + str(\n lstS[26]) + ',' + str(lstS[27]) + ',' + str(lstS[28]) + ','\n # R, C2C1, BATTV, POWMV,CURRMA, SOCPER,LAT,LONG\n csvWrite += str(lstS[29]) + ',' + str(lstS[30]) + ',' + str(lstS[31]) + ',' + str(\n lstS[32]) + ',' + str(lstS[33]) + ',' + str(lstS[34]) + ',' + str(lstS[38]) + str(',') + str(\n lstS[39])\n # fOut.write('\\n')\n\n #### REMOVING THE FIRST BIT OF DATA (if you need to )\n if seconds >= (firsttime + (60 * float(initialTimeBack))):\n fOut.write(csvWrite)\n\n del (csvWrite)\n # xCntGoodValues += 1\n\n xCntObs += 1\n\n # sOut = str(gZIP) + \",\" + str(f) + \",\" + str(xCntObs) + \",\" + str(xCntGoodValues) + \"\\n\"\n # fLog.write(sOut)\n\n infOut.write(str(xFilename) + '\\n')\n\n fOut.close()\n fLog.close()\n infOut.close()\n\n # xDate = dateob.strftime(\"%Y%m%d\")\n\n # newfnOut = xOutDir + xCar + \"_\" + xDate + \"_dat.csv\" #set CSV output for raw data\n # newfnLog = xOutDir + xCar + \"_\" + xDate + \"_log.csv\"\n\n # print(xCar + \"\\t\" + xdat + \"\\t\" + fnOut[-22:] + \"\\t\" + str(xCntObs) + \"\\t\" + str(xCntGoodValues) + \"\\t\" + str(\n # gZIP))\n\n print(f\"{xCar} \\t {xdat} \\t {fnOut[-(17 + len(xCar)):]} \\t {xCntObs} \\t {xCntGoodValues} \\t {gZIP}\")\n\n\n import numpy as np\n radians = False\n wind_df = pd.read_csv(fnOutTemp)\n wind_df['QUADRANT'] = wind_df.apply(lambda row: get_quadrant(row['U'], row['V']), axis=1)\n\n wind_df['secnan'] = wind_df.apply(lambda row: row['SECONDS'], axis=1) # + row['NANOSECONDS']*1e-9,axis=1)\n wind_df['prev_LAT'] = wind_df.LAT.shift(periods=1)\n wind_df['next_LAT'] = wind_df.LAT.shift(periods=-1)\n wind_df['prev_LONG'] = wind_df.LONG.shift(periods=1)\n wind_df['next_LONG'] = wind_df.LONG.shift(periods=-1)\n wind_df['prev_TIME'] = wind_df.secnan.shift(periods=1)\n wind_df['next_TIME'] = wind_df.secnan.shift(periods=-1)\n wind_df['distance'] = wind_df.apply(\n lambda row: haversine(row['prev_LAT'], row['prev_LONG'], row['next_LAT'], row['next_LONG']), axis=1)\n wind_df['bearing'] = wind_df.apply(\n lambda row: calc_bearing(row['prev_LAT'], row['next_LAT'], row['prev_LONG'], row['next_LONG'], radians),\n axis=1)\n wind_df['timediff'] = wind_df.apply(lambda row: row['next_TIME'] - row['prev_TIME'], axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda row: calc_velocity(row['timediff'], row['distance']), axis=1)\n wind_df['U_cor'] = wind_df.apply(lambda row: row['U'] + row['VELOCITY'], axis=1)\n wind_df['horz_length'] = wind_df.apply(lambda row: np.sqrt(row['U_cor'] ** 2 + row['V'] ** 2), axis=1)\n wind_df['uncor_theta'] = wind_df.apply(\n lambda row: calc_bearing(row['U_cor'], row['V'], row['QUADRANT'], row['horz_length'], radians), axis=1)\n wind_df['adj_theta'] = wind_df.apply(lambda row: (row['uncor_theta'] + row['bearing']) % 360, axis=1)\n wind_df['totalWind'] = wind_df.apply(lambda row: np.sqrt(row['horz_length'] ** 2 + row['W'] ** 2), axis=1)\n wind_df['phi'] = wind_df.apply(lambda row: np.arctan(row['horz_length']), axis=1)\n wind_df['shift_CH4'] = wind_df.CH4.shift(periods=int(float(shift)))\n wind_df['raw_CH4'] = wind_df.apply(lambda row: row['BCH4'], axis=1)\n wind_df['BCH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['CH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['TCH4'] = wind_df.loc[:, ['shift_CH4']]\n\n wind_df2 = wind_df[wind_df.CH4.notnull()]\n wind_df3 = wind_df2.drop(\n ['QUADRANT', 'secnan', 'prev_LAT', 'next_LAT', 'prev_LONG', 'next_LONG', 'prev_TIME', 'next_TIME',\n 'distance', 'timediff', 'uncor_theta', 'CH4'], axis=1)\n wind_df3['CH4'] = wind_df3.loc[:, 'shift_CH4']\n wind_df3 = wind_df3.drop(['shift_CH4'], axis=1).loc[:,\n ['DATE', 'TIME', 'SECONDS', 'NANOSECONDS', 'VELOCITY', 'U', 'V', 'W', 'BCH4', 'BRSSI', 'TCH4',\n 'TRSSI',\n 'PRESS_MBAR', 'INLET', 'TEMPC', 'CH4', 'H20', 'C2H6', 'R', 'C2C1', 'BATTV', 'POWMV', 'CURRMA',\n 'SOCPER',\n 'LAT', 'LONG', 'bearing', 'U_cor', 'horz_length', 'adj_theta', 'totalWind', 'phi', 'raw_CH4']]\n wind_df4 = add_odometer(wind_df3.loc[wind_df3.totalWind.notnull(), :], 'LAT', 'LONG')\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > xMinCarSpeed, :]\n wind_df4 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :].copy().drop_duplicates()\n if bFirst:\n wind_df4.to_csv(fnOut, index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(fnOut, index=False)\n os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def readSrc_bySens(self):\n dctn = self.srcData\n dctn['header'] = []\n # dctn['header'] = ['%% This dictionary created by alog_manip.alogrd_dict']\n for msg in self.srcFile: # broken by lines, are now strings\n msg = msg[0:-1] # remove \\n at the end of the string\n if '%%' in msg:\n dctn['header'].append(msg) # assume all comments occur at beginning of file\n else:\n msg = msg.split()\n if msg[2] not in dctn: # none from this gSource yet\n dctn[msg[2]] = {}\n if msg[1] not in dctn[msg[2]]: # none in this gSource from this zMeas yet\n dctn[msg[2]][msg[1]] = {}\n try:\n dctn[msg[2]][msg[1]][float(msg[0])] = float(msg[3]) # double\n except ValueError: # it's a string\n # dimc = msg[3].split(']')[0].split('x')[1] # cols\n # dimr = msg[3].split(']')[0].split('x')[0][1:] # rows\n value_s = msg[3].split(']')[1][1:-1].split(',')\n dctn[msg[2]][msg[1]][float(msg[0])] = [float(i) for i in value_s]\n except IndexError: # it's blank\n dctn[msg[2]][msg[1]][float(msg[0])] = None # nan better?", "def _make_image_info_hst(self, flistname):\n\n flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n #fname=line.strip()\n flist.append(fname)\n magzp_list.append(magzp)\n\n magzp = np.array(magzp_list)\n\n nimage = len(flist)\n\n path_len = max([len(f) for f in flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f.replace('sci.fits','wht.fits')\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def process_download_other_old(self, data, meta_file_name):\n block_size = 1024\n # content-length in bytes\n self.data_len = float(data.info().get('Content-length', None))\n config_pytomo.LOG.debug('Content-length: %s' % self.data_len)\n #meta_file = open(meta_file_name, 'ab')\n #meta_file = open(meta_file_name, 'ab+')\n tries = 0\n accumulated_playback = 0\n buff_state_tracker = False\n accumulated_buffer = 0.0\n initial_data = 0\n initial_rate = 0\n byte_counter = 0\n self.state = INITIAL_BUFFERING_STATE\n start = time.time()\n while True:\n # Download and write\n before = time.time()\n if not ((before - start) > self.download_time):\n # read in bytes\n data_block = data.read(block_size)\n else:\n break\n if (not self.encoding_rate\n and tries <= config_pytomo.MAX_NB_TRIES_ENCODING):\n self.compute_encoding_rate(meta_file_name)\n tries += 1\n data_block_len = len(data_block)\n if data_block_len == 0:\n break\n after = time.time()\n self.compute_interruptions(data_block_len, after)\n if self.state == PLAYING_STATE:\n accumulated_playback += (after - before)\n if not buff_state_tracker:\n initial_duration = accumulated_buffer\n try:\n initial_rate = (initial_data * 8 / initial_duration /\n 1000)\n except ZeroDivisionError:\n initial_rate = 0\n buff_state_tracker = True\n elif self.state == BUFFERING_STATE:\n accumulated_buffer += (after - before)\n if not buff_state_tracker:\n initial_data += data_block_len\n else:\n config_pytomo.LOG.error(\"Unexpected state case\")\n break\n byte_counter += data_block_len\n block_size = self.best_block_size(after - before, data_block_len)\n instant_thp = (8e-3 * data_block_len / (after - before)\n if (after - before) != 0 else None)\n self.max_instant_thp = max(self.max_instant_thp, instant_thp)\n if config_pytomo.LOG_LEVEL == config_pytomo.DEBUG:\n # Progress message\n progress_stats = {\n 'percent_str': self.calc_percent(self._total_bytes,\n self.data_len),\n 'data_len_str': self.format_bytes(self.data_len),\n 'eta_str': self.calc_eta(start, time.time(), self.data_len,\n self._total_bytes),\n 'speed_str': self.calc_speed(start, time.time(),\n self._total_bytes),\n # in order to avoid None convertion to float in\n # report_progress and still have information\n 'instant_thp': str(instant_thp),\n 'byte_counter': self._total_bytes,\n 'current_buffer': self.current_buffer,\n }\n self.report_progress(progress_stats)\n self.set_total_bytes(byte_counter)\n self.accumulated_playback = accumulated_playback\n self.accumulated_buffer = accumulated_buffer\n self.initial_data = initial_data\n self.initial_rate = initial_rate\n return after - start", "def load(datastream):", "def load_verts(file_data, headers, scale_factor):\n\n\n def vert_from_pack(vert_data):\n return (\n (vert_data[0] * scale_factor, vert_data[1] * scale_factor, vert_data[2] * scale_factor,), #XYZ\n (vert_data[3], vert_data[4],), #UV1\n (vert_data[5], vert_data[6],), #UV2\n (vert_data[7], vert_data[8], vert_data[9],), #Normal\n (vert_data[10], vert_data[11], vert_data[12], vert_data[13],), #RGBA\n )\n\n vert_offset, vert_length = headers[10]\n vert_chunk = Struct(\"3f2f2f3f4B\") \n vert_size = vert_chunk.size\n vert_count = int(vert_length / vert_size)\n\n print (\"Found {} vertices\".format(vert_count))\n\n vertices = []\n\n for current_vert_idx in range(vert_count):\n vert_file_position = vert_offset + current_vert_idx * vert_size\n current_vert = vert_chunk.unpack(file_data[vert_file_position : vert_file_position+vert_size])\n vertices.append(vert_from_pack(current_vert))\n\n return vertices", "def _ReadMemberCompressedData(self, file_object):\n zlib_decompressor = zlib.decompressobj(-zlib.MAX_WBITS)\n compressed_data = file_object.read(self._BUFFER_SIZE)\n while compressed_data:\n data, compressed_data = self._ReadCompressedData(\n zlib_decompressor, compressed_data)\n if compressed_data:\n file_object.seek(-len(compressed_data), os.SEEK_CUR)\n\n if not data:\n break\n\n compressed_data = file_object.read(self._BUFFER_SIZE)", "def extract_data(filename, num_images, IMAGE_WIDTH):\n\n# this function definition has been taken from internet\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_WIDTH * IMAGE_WIDTH * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32) #Interpret a buffer as a 1-dimensional array\n data = data.reshape(num_images, IMAGE_WIDTH*IMAGE_WIDTH)\n return data", "def get_data_ge(logger, file):\n fp = open(file, 'rb')\n offset = 8192\n\n fp.seek(18)\n size, nframes = st.unpack('<ih',fp.read(6))\n if size != 2048:\n logger.error('GE image size unexpected: '+str(size))\n return None, 0, 0\n\n fsize = os.stat(str(fp).split(\"'\")[1]).st_size\n nframes_calc = (fsize - offset)/(2*size**2)\n\n if nframes != nframes_calc:\n logger.error('GE number frames unexpected: '+str(nframes))\n return None, 0, 0\n\n pos = offset\n fp.seek(pos)\n\n return fp, int(nframes_calc), size*size", "def debianize( strFilename ):\n \n #~ data = gzip.GzipFile( strFilename ).read();\n #~ print data;\n #~ return;\n \n #~ data = gzip.open( strFilename ).read();\n #~ print data;\n #~ return; \n \n #~ uncompressedData = bz2.BZ2File(strFilename).read()\n #~ print str(uncompressedData)\n #~ return;\n \n #~ file = open( strFilename, 'rb' );\n #~ data = file.read();\n #~ file.close();\n #~ print debug.dumpHexa( data );\n \n #~ ar = tarfile.open(strFilename, 'r:*')\n #~ for item in ar:\n #~ print( str(item) );\n #~ print( \"%s:\" % item.name );\n #~ #print debug.dumpHexa(item.buf);\n #~ #print zlib.decompress(item.buf)\n #~ #print zlib.decompress(ar.extractfile(item).read())\n #~ data = ar.extractfile(item.name).read()\n #~ print data # works !\n #~ ar.close() \n #~ return;\n \n fileLists = [];\n file = open( strFilename );\n data = file.read();\n file.close();\n \n print( \"data len: %d\" % len( data ) );\n\n nDataCompressedOffset = 0; # 132\n\n # works fine on toto.gz\n #~ f = gzip.open(strFilename, 'rb')\n #~ file_content = f.read()\n #~ print file_content\n #~ f.close() \n \n #~ decompressor = bz2.BZ2Decompressor();\n #~ uncompressed = decompressor.decompress(data[nDataCompressedOffset:]);\n \n #~ uncompressed = zlib.decompress(data[nDataCompressedOffset:]);\n \n uncompressed = decompress( data );\n print( \"uncompressed: %s\" % str( uncompressed ) );", "def read_zp(file):\n with open(file) as f_in:\n head = f_in.readline()\n units = f_in.readline()\n for line in f_in:\n try:\n zpWave[line.split(' ')[0].replace('\"', '')] = float(line.split(' ')[1])\n zpF0[line.split(' ')[0].replace('\"', '')] = float(line.split(' ')[2])\n \n except NameError:\n zpWave = {line.split(' ')[0].replace('\"', '') : float(line.split(' ')[1])}\n zpF0 = {line.split(' ')[0].replace('\"', '') : float(line.split(' ')[2])}\n \n return zpWave, zpF0", "def _load_bgzf_block(handle):\n\n # Pull in the BGZF block header information\n header, _ = _bgzf_metaheader(handle)\n XLEN = header[-4]\n BSIZE = struct.unpack('<H', handle.read(2))[0]\n\n # Expose the compressed data\n d_size = BSIZE - XLEN - 19\n d_obj = zlib.decompressobj(-15)\n data = d_obj.decompress(handle.read(d_size)) + d_obj.flush()\n\n # Checking data integrity\n CRC32, ISIZE = unpack_gzip_integrity(handle.read(_integrity_size))\n deflated_crc = zlib.crc32(data)\n if deflated_crc < 0:\n deflated_crc = deflated_crc % (1 << 32)\n if CRC32 != deflated_crc:\n raise ValueError('CRCs are not equal: is {}, not {}'.format(CRC32, deflated_crc))\n if ISIZE != len(data):\n raise ValueError('unequal uncompressed data size')\n\n return BSIZE + 1, data", "def __init__(self, fits_file, ext=0):", "def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header", "def extractOldALFOSCHeader(file):\n\n try:\n\n hdulist = pyfits.open(file)\n hdulist.close() \n\n # Extract primary header unit\n ph = extractHDU(file,0)\n\n # Form a proper timestamp from a float type UT \n ut = requireValidFloat('UT',ph) \n hh = int(ut)\n mm = int((ut-hh)*60)\n ss = int((((ut-hh)*60)-mm)*60)\n timestamp = \"%02d:%02d:%02d\" % (hh,mm,ss)\n date_obs = requireValidString('DATE-OBS', ph)\n\n fitsheader = {\n 'imagetyp': ph.get('IMAGETYP', 'na').strip() or 'na',\n 'exptime'\t: requireValidFloat('EXPTIME',ph),\t\t\t\n 'azimuth'\t: '0.00', \t\n 'austatus': 'na',\t\n 'telfocus': requireValidInt('TELFOCUS', ph),\n 'gain'\t: '0.726',\n 'alfltid'\t: requireValidInt('FILTID', ph),\n 'alfltnm'\t: requireValidString('FILTER', ph),\t \t\n 'fafltid'\t: requireValidInt('AFILTID', ph),\n 'fafltnm'\t: requireValidString('AFILTER', ph),\n 'fbfltid'\t: requireValidInt('BFILTID', ph),\n 'fbfltnm'\t: requireValidString('BFILTER', ph),\t\t\n 'rotpos' : requireValidFloat('ROTPOS',ph),\n 'apertur' : requireValidString('APERTUR', ph),\n 'ra' : '%.2f' % requireValidFloat('RA',ph),\n 'decl' : '%.2f' % requireValidFloat('DEC',ph) \n\t\n }\n \n fitsheader['dateobs'] = \"%sT%s\" % (date_obs, timestamp)\n\n # Calculate telescope altitude from airmass\n airmass = requireValidFloat('AIRMASS',ph)\n fitsheader['telalt'] = '%.2f' % (90 - degrees(pi/2 - asin(1/airmass))) \n\n # Calculate pixel scale\n cd1_1 = requireValidInt('CDELT1', ph)\n fitsheader['pscale'] = str(cd1_1 * 0.19)\n\n fitsheader['instrume'] = 'alfosc'\n\n if (fitsheader['exptime'] > 1.0) and (requireValidString('GRISM', ph) == 'Open_(Lyot)'):\n fitsheader['imaging'] = 1\n else:\n fitsheader['imaging'] = 0\t\n\n fitsheader['keys'] = ['dateobs','telalt','azimuth','rotpos','ra','decl','telfocus','pscale','gain',\n\t\t'apertur','alfltid','alfltnm','fafltid','fafltnm','fbfltid','fbfltnm',\n\t\t'imagetyp','exptime','austatus']\n\n except HeaderException, e:\n return ['ERROR']\n\n\n return fitsheader", "def __init__(self, path, max_cache=50000): # 50kb\n self.spindle = 0\n self.cache = BytesIO()\n self.max_cache = max_cache\n self.f = open(path, 'ab+')", "def MTread(fn,slMode='s',leng=0,start=0, wav_out=None, outpath='Default Folder',header=None):\n #check variables\n try:\n fn\n except NameError:\n raise Warning('Filename fn needs to be defined!')\n \n try:\n slMode\n except NameError:\n warnings.warn('slMode - the start and length mode was not defined...defaulting to s for seconds')\n slMode = 's'\n if slMode.upper() not in ['S','P']:\n warnings.warn('slMode - the start and length mode has to be either s for seconds or p for points...defaulting to s for seconds')\n slMode = 's'\n \n try:\n leng\n except NameError:\n warnings.warn('leng - the length of the data to be read in was not defined...defaulting to leng = 0, reading in all data')\n leng = 0\n if type(leng) != int:\n warnings.warn('leng - the length of the data has to be an integer...defaulting to leng = 0, reading in all data')\n leng = 0\n \n try:\n start\n except NameError:\n warnings.warn('start - the starting point or time was not defined...defaulting to start = 0, reading from the start')\n start = 0\n if type(leng) != int:\n warnings.warn('start - the starting point or time was not defined...defaulting to start = 0, reading from the start')\n start = 0\n \n # Create empty dictionaries\n HEADER = {}\n INFO = {}\n \n if leng==0: leng = np.inf\n \n #check if auxiliary data\n vcode = path.basename(fn)[2]\n aux = True if vcode in ['I','J','K','P','T','X','Y','Z'] else False\n \n #open the binary file and start reading\n with open(fn, \"rb\") as f:\n magicstring = f.read(8).decode('ascii').strip().strip('\\x00')\n if magicstring == 'DATA':\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Found Data...')\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Getting Header information...')\n HEADER['totalhdrs'] = int(f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['abbrev '] = f.read(8).decode('ascii').strip().strip('\\x00')\n HEADER['stationcode'] = f.read(3).decode('ascii').strip().strip('\\x00')\n HEADER['title'] = f.read(82).decode('ascii').strip().strip('\\x00')\n HEADER['month'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['day'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['year'] = (f.read(5).decode('ascii').strip().strip('\\x00'))\n HEADER['hours'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['minutes'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['seconds'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['msec'] = (f.read(4).decode('ascii').strip().strip('\\x00'))\n HEADER['sampling_period'] = float(f.read(15).decode('ascii').strip().strip('\\x00'))\n HEADER['samplebits'] = int(f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['wordsize'] = int(f.read(2).decode('ascii').strip().strip('\\x00'))\n \n #if HEADER['wordsize'] < HEADER['samplebits']/8:\n #warnings.warn('The samplebits field Does not fit the wordsize field. --- This file may be bad. ')\n HEADER['typemark'] = f.read(1).decode('ascii').strip().strip('\\x00')\n \n HEADER['swapping'] = f.read(1).decode('ascii').strip().strip('\\x00')\n \n HEADER['signing'] = f.read(1).decode('ascii').strip().strip('\\x00')\n HEADER['caltype'] = f.read(1).decode('ascii').strip().strip('\\x00')\n HEADER['calmin'] = float(f.read(15).decode('ascii').strip().strip('\\x00'))\n HEADER['calmax'] = float(f.read(15).decode('ascii').strip().strip('\\x00'))\n HEADER['calunits'] = f.read(40).decode('ascii').strip().strip('\\x00')\n HEADER['recordsize'] = int(f.read(6).decode('ascii').strip().strip('\\x00'))\n HEADER['sourcevers'] = f.read(9).decode('ascii').strip().strip('\\x00')\n HEADER['sourcesn'] = f.read(16).decode('ascii').strip().strip('\\x00')\n print(HEADER)\n \n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Getting Meta data...')\n INFO['filename'] = fn\n INFO['filesize'] = path.getsize(fn)\n INFO['srate'] = 1/HEADER['sampling_period']\n INFO['when'] = datetime.strptime(HEADER['year'] + '/' + HEADER['month'] + '/' + HEADER['day'] + ' ' + HEADER['hours'] + ':' + HEADER['minutes'] + ':' + HEADER['seconds'] + '.' + HEADER['msec'],'%Y/%m/%d %H:%M:%S.%f')\n INFO['datenumber'] = date.toordinal(INFO['when'])\n \n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Reading Data...')\n if slMode.upper() == 'P': # Start & Length specified in # Points (samples)\n INFO['whenC'] = INFO['when'] + timedelta(seconds=start/INFO['srate'])\n INFO['datenumber'] = INFO['datenumber'] + (start/INFO['srate']/24/3600)\n else:\n INFO['whenC'] = INFO['when'] + timedelta(seconds=start) # Corrected start time (with offset)\n INFO['datenumber'] = INFO['datenumber'] + start/24/3600\n \n if 'wordsize' in HEADER:\n if HEADER['wordsize'] == '':\n HEADER['wordsize'] = 2\n else:\n HEADER['wordsize'] = 2\n \n INFO['nsamp'] = int((INFO['filesize'] - 512 * HEADER['totalhdrs']) / HEADER['wordsize'])\n INFO['seconds'] = INFO['nsamp'] / INFO['srate']\n \n if leng > 0: # Only load data if it's been asked for.\n if any(x in HEADER['swapping'] for x in ['S','L','s','l']):\n mode = '<'\n else:\n mode = '>'\n \n status = 0\n if slMode.upper() == 'P': # specified start time in sample 'P'oints rather than time\n try:\n f.seek(int(512 * HEADER['totalhdrs']) + int(start) * HEADER['wordsize']) # Skip by samples/points\n except:\n status = 1\n else:\n try:\n f.seek(int(512 * HEADER['totalhdrs']) + round(start * INFO['srate'] * HEADER['wordsize'])) # skip by time (seconds)\n except:\n status = 1\n \n if status == 0: # If status is nonzero, we probably went past the end of the file.\n if HEADER['caltype'].upper() == 'F':\n if not any(x == HEADER['wordsize'] for x in [4,8]):\n f.close(f)\n #raise Warning('Invalid word size! Only valid Float sizes are four or eight bytes.')\n binType = 'float' + str(HEADER['wordsize'] * 8)\n else:\n binType = 'bit' + str(HEADER['wordsize'] * 8)\n if any(x in HEADER['signing'] for x in ['U','u']):\n binType = 'u' + binType\n \n \n if slMode.upper() == 'P':\n if leng == np.inf:\n fi = f.read()\n else:\n fi = f.read(leng)\n \n else:\n if leng == np.inf:\n fi = f.read()\n else:\n fi = f.read(int(leng*INFO['srate'])*2)\n if aux:\n fmt = '%c%iH' %(mode,len(fi)/2)\n else:\n fmt = '%c%ih' %(mode,len(fi)/2)\n p = unpack(fmt,fi)\n \n calmax = HEADER['calmax']\n calmin = HEADER['calmin']\n \n if (type(calmin) == float and type(calmax) == float and ((calmin + np.spacing(1)) < calmax) and HEADER['caltype'].upper() != 'F'):\n calmax = HEADER['calmax']\n calmin = HEADER['calmin']\n if HEADER['signing'].upper() == 'U':\n bitmin = 0\n bitmax = 2**HEADER['samplebits'] - 1\n else:\n bitmin = -(2**(HEADER['samplebits']-1))\n bitmax = (2**(HEADER['samplebits']-1)) - 1\n \n \n multiplier = (calmax - calmin) / (bitmax - bitmin)\n p = (np.array(p) - bitmin) * multiplier + calmin\n else:\n p = []# Output an empty matrix if requested data is beyond the length of the current file\n \n else:\n p = [] # Also output an empty matrix of zero length LENGTH input is requested (ie, only return header/info values)\n INFO['count'] = 0\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Returning data...')\n \n #check if it is a data or aux file\n \n if aux:\n p = pd.DataFrame({'Value':p})\n p['VarCode'] = vcode\n p['mission'] = HEADER['title'].split('-')[0] \n p['sampling_rate'] = HEADER['sampling_period']\n p['nSample'] = np.arange(1,p.shape[0]+1)\n p['start_time'] = pd.to_datetime(HEADER[\"year\"] + \"-\" + HEADER[\"month\"] + \"-\" + HEADER[\"day\"] + \" \" + HEADER[\"hours\"] + \":\" +\\\n HEADER[\"minutes\"] + \":\" + HEADER[\"seconds\"] + \".\" + HEADER[\"msec\"])\n p['sec_since_start'] = p['nSample'] * p['sampling_rate']\n p['Time'] = p['start_time'] + pd.to_timedelta(p['sec_since_start'], unit='s')\n return(p,HEADER,'aux')\n else:\n if wav_out != None:\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Saving wav file...' + HEADER['title'].split('-')[0] )\n if 'p':\n if outpath=='Default Folder':\n outpath = path.dirname(fn)\n outfn = outpath +'\\\\' + INFO['when'].strftime('D%m%d%YT%H%M%S') + '_' + path.basename(fn)[:-3] + '.wav'\n sr = int(INFO['srate'])\n data = p\n write(outfn,int(sr), np.int16(data/(abs(data).max())*np.iinfo(np.int16).max))\n \n if header != None:\n if outpath=='Default Folder':\n outpath = path.dirname(fn)\n hh = pd.DataFrame.from_dict(HEADER, orient='index')\n hh.to_csv( outpath +'\\\\' + INFO['when'].strftime('D%m%d%YT%H%M%S') + '_' + path.basename(fn)[:-3] + '.csv')\n if 'p':\n return p,HEADER,INFO", "def read (self, file):\n\t\tself.unpack (file.read (self.size()))", "def __init__(self, **kwargs):\n super(ImageExporter, self).__init__(**kwargs)\n # COMPRESS=PACKBITS\n # PIL: TIFF uncompressed, or Packbits, LZW, or JPEG compressed images. In the current version, PIL always writes uncompressed TIFF files\n # http://linfiniti.com/2011/05/gdal-efficiency-of-various-compression-algorithms/\n # predictor for 'DEFLATE' or 'LZW' : 1 or 2\n i_tiff_compression_predictor=2\n # zlevel for 'DEFLATE' : 1 to 9\n i_tiff_compression_zlevel=8\n self.jpg_quality=75\n self.tiff_compression=[]\n self._metadata = []\n if self.reader.metadata_input:\n self.metadata_input=self.reader.metadata_input\n self.tiff_compress = kwargs.get('tiff_compression', \"LZW\")\n self.tiff_compress =self.tiff_compress.upper()\n self.jpg_quality = kwargs.get('jpg_quality', self.jpg_quality)\n if self.jpg_quality < 1 or self.jpg_quality > 95:\n self.jpg_quality=75\n i_tiff_compression_predictor = kwargs.get('tiff_predictor', i_tiff_compression_predictor)\n if i_tiff_compression_predictor < 1 or i_tiff_compression_predictor > 2:\n i_tiff_compression_predictor=2\n i_tiff_compression_zlevel = kwargs.get('tiff_zlevel', i_tiff_compression_zlevel)\n if i_tiff_compression_zlevel < 1 or i_tiff_compression_zlevel > 9:\n i_tiff_compression_predictor=8\n if self.tiff_compress == \"PACKBITS\" :\n self.tiff_compression.append('COMPRESS=PACKBITS')\n elif self.tiff_compress == \"DEFLATE\":\n self.tiff_compression.append('COMPRESS=%s' % 'DEFLATE')\n self.tiff_compression.append('PREDICTOR=%d' % i_tiff_compression_predictor)\n self.tiff_compression.append('ZLEVEL=%d' % i_tiff_compression_zlevel)\n elif self.tiff_compress == \"LZW\":\n self.tiff_compression.append('COMPRESS=%s' % 'LZW')\n self.tiff_compression.append('PREDICTOR=%d' % i_tiff_compression_predictor)\n elif self.tiff_compress == \"NONE\":\n self.tiff_compression.append('COMPRESS=NONE')", "def _readBTS(self,fname):\n with BinaryFile(fname) as f:\n #\n # read header info\n #\n if self.verbose: print('Reading header information from',fname)\n\n ID = f.read_int2()\n assert( ID==7 or ID==8 )\n if ID==7: filetype = 'non-periodic'\n elif ID==8: filetype = 'periodic'\n else: filetype = 'UNKNOWN'\n if self.verbose:\n print(' id= {:d} ({:s})'.format(ID,filetype))\n\n # - read resolution settings\n self.NZ = f.read_int4()\n self.NY = f.read_int4()\n self.Ntower = f.read_int4()\n if self.verbose:\n print(' NumGrid_Z,_Y=',self.NZ,self.NY)\n print(' ntower=',self.Ntower)\n self.N = f.read_int4()\n self.dz = f.read_float(dtype=self.realtype)\n self.dy = f.read_float(dtype=self.realtype)\n self.dt = f.read_float(dtype=self.realtype)\n self.period = self.realtype(self.N * self.dt)\n self.Nsize = 3*self.NY*self.NZ*self.N\n if self.verbose:\n print(' nt=',self.N)\n print(' (problem size: {:d} points)'.format(self.Nsize))\n print(' dz,dy=',self.dz,self.dy)\n print(' TimeStep=',self.dt)\n print(' Period=',self.period)\n\n # - read reference values\n self.uhub = f.read_float(dtype=self.realtype)\n self.zhub = f.read_float(dtype=self.realtype) # NOT USED\n self.zbot = f.read_float(dtype=self.realtype)\n if self.Umean is None:\n self.Umean = self.uhub\n if self.verbose:\n print(' Umean = uhub =',self.Umean,\n '(for calculating fluctuations)')\n else: # user-specified Umean\n if self.verbose:\n print(' Umean =',self.Umean,\n '(for calculating fluctuations)')\n print(' uhub=',self.uhub,' (NOT USED)')\n if self.verbose:\n print(' HubHt=',self.zhub,' (NOT USED)')\n print(' Zbottom=',self.zbot)\n\n # - read scaling factors\n self.Vslope = np.zeros(3,dtype=self.realtype)\n self.Vintercept = np.zeros(3,dtype=self.realtype)\n for i in range(3):\n self.Vslope[i] = f.read_float(dtype=self.realtype)\n self.Vintercept[i] = f.read_float(dtype=self.realtype)\n if self.verbose:\n # output is float64 precision by default...\n print(' Vslope=',self.Vslope)\n print(' Vintercept=',self.Vintercept)\n\n # - read turbsim info string\n nchar = f.read_int4()\n version = f.read(N=nchar)\n if self.verbose: print(version)\n\n #\n # read normalized data\n #\n # note: need to specify Fortran-order to properly read data using np.nditer\n t0 = time.process_time()\n if self.verbose: print('Reading normalized grid data')\n\n self.U = np.zeros((3,self.NY,self.NZ,self.N),order='F',dtype=self.realtype)\n self.T = np.zeros((self.N,self.NY,self.NZ))\n if self.verbose:\n print(' U size :',self.U.nbytes/1024.**2,'MB')\n\n for val in np.nditer(self.U, op_flags=['writeonly']):\n val[...] = f.read_int2()\n self.U = self.U.swapaxes(3,2).swapaxes(2,1) # new shape: (3,self.N,self.NY,self.NZ)\n\n if self.Ntower > 0:\n if self.verbose:\n print('Reading normalized tower data')\n self.Utow = np.zeros((3,self.Ntower,self.N),\n order='F',dtype=self.realtype)\n if self.verbose:\n print(' Utow size :',self.Utow.nbytes/1024.**2,'MB')\n for val in np.nditer(self.Utow, op_flags=['writeonly']):\n val[...] = f.read_int2()\n\n if self.verbose:\n print(' Read velocitiy fields in',time.process_time()-t0,'s')\n \n #\n # calculate dimensional velocity\n #\n if self.verbose:\n print('Calculating velocities from normalized data')\n for i in range(3):\n self.U[i,:,:,:] -= self.Vintercept[i]\n self.U[i,:,:,:] /= self.Vslope[i]\n if self.Ntower > 0:\n self.Utow[i,:,:] -= self.Vintercept[i]\n self.Utow[i,:,:] /= self.Vslope[i]\n self.U[0,:,:,:] -= self.Umean # uniform inflow w/ no shear assumed\n\n print(' u min/max [',np.min(self.U[0,:,:,:]),\n np.max(self.U[0,:,:,:]),']')\n print(' v min/max [',np.min(self.U[1,:,:,:]),\n np.max(self.U[1,:,:,:]),']')\n print(' w min/max [',np.min(self.U[2,:,:,:]),\n np.max(self.U[2,:,:,:]),']')\n\n self.scaling = np.ones((3,self.NZ))\n\n #\n # calculate coordinates\n #\n if self.verbose:\n print('Calculating coordinates')\n #self.y = -0.5*(self.NY-1)*self.dy + np.arange(self.NY,dtype=self.realtype)*self.dy\n self.y = np.arange(self.NY,dtype=self.realtype)*self.dy\n self.z = self.zbot + np.arange(self.NZ,dtype=self.realtype)*self.dz\n #self.ztow = self.zbot - np.arange(self.NZ,dtype=self.realtype)*self.dz #--NOT USED\n\n self.t = np.arange(self.N,dtype=self.realtype)*self.dt\n if self.verbose:\n print('Read times [',self.t[0],self.t[1],'...',self.t[-1],']')", "def read(self, src):\n self.read_mesh(src)\n self.read_data(src)", "def _image_data(self):\n if self.header['Image']['bytes per pixel'] == 2:\n # 16-bit unsigned integers, short\n dt = np.dtype(self._bo + 'H')\n elif self.header['Image']['bytes per pixel'] == 4:\n # 32-bit unsigned integers, int\n dt = np.dtype(self._bo + 'I')\n\n shape = [self.header['Image']['planes'],\n self.header['Image']['masses'],\n self.header['Image']['height'],\n self.header['Image']['width']]\n\n self.fh.seek(self.header['header size'])\n\n compressedfiles = (\n gzip.GzipFile,\n bz2.BZ2File,\n tarfile.ExFileObject,\n lzma.LZMAFile,\n io.BytesIO\n )\n\n # fromfile is about 2x faster than frombuffer(fh.read())\n if isinstance(self.fh, compressedfiles):\n data = np.frombuffer(self.fh.read(), dtype=dt).reshape(shape)\n else:\n data = np.fromfile(self.fh, dtype=dt).reshape(shape)\n\n # We want to have a cube of contiguous data (stacked images) for each\n # mass. Swap axes 0 and 1. Returns a view, so make full copy to make\n # data access faster.\n data = data.swapaxes(0, 1).copy()\n\n self.data = xarray.DataArray(data,\n dims=('species', 'frame', 'y', 'x'),\n coords={'species': ('species', list(self.header['label list']))},\n attrs={'unit': 'counts'})", "def _read_dx(self, FN):\n if FN.endswith('.dx'):\n F = open(FN, 'r')\n else:\n import gzip\n F = gzip.open(FN, 'r')\n\n # Read the header\n line = F.readline()\n while line.find('object') == -1:\n line = F.readline()\n header = {}\n header['counts'] = [int(x) for x in line.split(' ')[-3:]]\n for name in ['origin', 'd0', 'd1', 'd2']:\n header[name] = [float(x) for x in F.readline().split(' ')[-3:]]\n F.readline()\n header['npts'] = int(F.readline().split(' ')[-3])\n\n # Test to make sure the grid type is okay.\n # These conditions are not absolultely essential,\n # but they reduce the number of subtraction operations.\n if not (header['d0'][1] == 0 and header['d0'][2] == 0\n and header['d1'][0] == 0 and header['d1'][2] == 0\n and header['d2'][0] == 0 and header['d2'][1] == 0):\n raise Exception('Trilinear grid must be in original basis')\n if not (header['d0'][0] > 0 and header['d1'][1] > 0\n and header['d2'][2] > 0):\n raise Exception('Trilinear grid must have positive coordinates')\n\n # Read the data\n vals = np.ndarray(shape=header['npts'], dtype=float)\n index = 0\n while index < header['npts']:\n line = F.readline()[:-1]\n items = [float(item) for item in line.split()]\n vals[index:index + len(items)] = items\n index = index + len(items)\n F.close()\n\n data = {\n 'origin':np.array(header['origin']), \\\n 'spacing':np.array([header['d0'][0],header['d1'][1],header['d2'][2]]), \\\n 'counts':np.array(header['counts']), \\\n 'vals':vals}\n return data", "def _read_file(self):\n\n with open(self.file_name, 'rb') as f:\n new_test = struct.unpack('<l', f.read(8)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n old_test = struct.unpack('<h', f.read(6)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n other_test = struct.unpack('<l', f.read(20)[16:])[0]\n f.close()\n\n open_file = open(self.file_name, 'rb')\n\n if (other_test==202):\n raw = open_file.read(1236)[11:]\n self.model = '202'\n elif ((not new_test==102) and old_test==102):\n raw = open_file.read(1133)\n self.model = '102old'\n elif (new_test==102 and old_test==102):\n raw = open_file.read(1224)\n self.model = '102new'\n\n self.header = DpHeader(raw, self.model)\n\n self.data = DpData(open_file, \n self.model, \n self.header.interferogram_size, \n self.header.number_of_coadds, \n 2048*self.header.zero_fill,\n self.header.laser_wavelength_microns, \n self.header.dispersion_constant_xm,\n self.header.dispersion_constant_xb)\n\n open_file.close()", "def LoadZVIMetaData(filename):\r\n print \"Loading zvi file metadata...\"\r\n\r\n ole = OleFileIO_PL.OleFileIO(filename)\r\n #ole.dumpdirectory()\r\n metadata=ole.extract_metadata()\r\n (channeldict,Width,Height,MosaicSizeX,MosaicSizeY,ScaleFactorX,ScaleFactorY,\\\r\n channels,XPositions,YPositions,FocusPositions,XCoors,YCoors,ExposureTimes)=metadata\r\n Xpos=np.array(XPositions);\r\n Ypos=np.array(YPositions);\r\n\r\n extent=[Xpos.min()-(Width/2)*ScaleFactorX,Xpos.max()+(Width/2)*ScaleFactorX,\\\r\n Ypos.max()+(Height/2)*ScaleFactorY,Ypos.min()-(Height/2)*ScaleFactorY]\r\n \r\n return extent", "def _read_data(self, fh, byteorder='>'):\r\n fh.seek(len(self.header))\r\n data = fh.read()\r\n dtype = 'u1' if self.maxval < 256 else byteorder + 'u2'\r\n depth = 1 if self.magicnum == b\"P7 332\" else self.depth\r\n shape = [-1, self.height, self.width, depth]\r\n size = numpy.prod(shape[1:])\r\n if self.magicnum in b\"P1P2P3\":\r\n data = numpy.array(data.split(None, size)[:size], dtype)\r\n data = data.reshape(shape)\r\n elif self.maxval == 1:\r\n shape[2] = int(math.ceil(self.width / 8))\r\n data = numpy.frombuffer(data, dtype).reshape(shape)\r\n data = numpy.unpackbits(data, axis=-2)[:, :, :self.width, :]\r\n else:\r\n data = numpy.frombuffer(data, dtype)\r\n data = data[:size * (data.size // size)].reshape(shape)\r\n if data.shape[0] < 2:\r\n data = data.reshape(data.shape[1:])\r\n if data.shape[-1] < 2:\r\n data = data.reshape(data.shape[:-1])\r\n if self.magicnum == b\"P7 332\":\r\n rgb332 = numpy.array(list(numpy.ndindex(8, 8, 4)), numpy.uint8)\r\n rgb332 *= [36, 36, 85]\r\n data = numpy.take(rgb332, data, axis=0)\r\n return data", "def parse(self, calibration_px=1.0):\n self.isParsingNeeded = False\n self.meta_data = {}\n self.data = []\n #CZI files\n if self.extend == '.czi':\n with czifile.CziFile(self.file_path) as czi:\n data = czi.asarray()\n Header_Metadata = str(czi).split('<ImageDocument>')\n string = '<ImageDocument>'+Header_Metadata[1]\n #print(string.strip(\"'\"))\n metadata = XMLET.fromstring(string.strip(\"'\"))\n try:\n #Query XML fore the metadata for picture shape(X;Y;Z-stacks).\n #Picture Shape.\n shapes = metadata.findall('./Metadata/Information/Image')[0]\n self.meta_data[\"ShapeSizeX\"] = int(shapes.findall('SizeX')[0].text)\n self.meta_data[\"ShapeSizeY\"] = int(shapes.findall('SizeY')[0].text)\n try:\n self.meta_data[\"ShapeSizeZ\"] = int(shapes.findall('SizeZ')[0].text)\n except:\n self.meta_data[\"ShapeSizeZ\"] = 1\n #Get the hyperstack dimension if the image is a hyperstack.\n try:\n self.meta_data[\"ShapeSizeC\"] = int(shapes.findall('SizeC')[0].text)\n except:\n self.meta_data[\"ShapeSizeC\"] = 1\n print(\"No info of color channels 1 assumed\")\n #Get physical pixel size of image(nm/px) convert to(µm/px).\n PixelSizes = metadata.findall('./Metadata/Scaling/Items/Distance')\n self.meta_data['SizeX'] = float(PixelSizes[0].findall('Value')[0].text)*10**6\n self.meta_data['SizeY'] = float(PixelSizes[1].findall('Value')[0].text)*10**6\n self.meta_data['SizeZ'] = float(PixelSizes[2].findall('Value')[0].text)*10**6\n except(ValueError):\n print (\"Metadata fail\")\n\n #Tiff files.\n #Tiff files are problematic because they most likely wont contain the necessary metadata.\n #Try to get the shape info over common dimensions.\n elif self.extend == '.tif':\n with tifffile.TiffFile(self.file_path) as tif:\n data = tif.asarray()\n for shape in data.shape:\n if shape <5:\n self.meta_data[\"ShapeSizeC\"] = shape\n elif shape <40:\n self.meta_data[\"ShapeSizeZ\"] = shape\n else:\n self.meta_data[\"ShapeSizeY\"] = shape\n self.meta_data[\"ShapeSizeX\"] = shape\n\n #Read Lsm Files.\n elif self.extend == '.lsm':\n with tifffile.TiffFile(self.file_path) as tif:\n data = tif.asarray(memmap=True)\n headerMetadata = str(tif.pages[0].cz_lsm_scan_info)\n metadataList = headerMetadata.split(\"\\n*\")\n #Get image shape from lsm header SizeC=0 if not given.\n for shapes in metadataList:\n if \"images_height\" in shapes:\n self.meta_data[\"ShapeSizeX\"]= int(shapes.split()[-1])\n if \"images_width\" in shapes:\n self.meta_data[\"ShapeSizeY\"]= int(shapes.split()[-1])\n if \"images_number_planes\" in shapes:\n self.meta_data[\"ShapeSizeZ\"]= int(shapes.split()[-1])\n if \"images_number_channels\" in shapes:\n self.meta_data[\"ShapeSizeC\"]= int(shapes.split()[-1])\n #Get physical pixel size of image(nm/px) convert to(µm/px).\n data = np.swapaxes(data,1,2)\n lsm_header = str(tif.pages[0].tags.cz_lsm_info)\n LsmInfo = lsm_header.split(\", \")\n i = 0\n #Query for pixel size.\n for element in LsmInfo:\n if \"e-0\" in element:\n i += 1\n if i == 1:\n self.meta_data['SizeX'] = (float(element)*10**6)\n if i == 2:\n self.meta_data['SizeY'] = (float(element)*10**6)\n if i == 3:\n self.meta_data['SizeZ'] = (float(element)*10**6)\n\n elif self.extend == \".png\":\n data = misc.imread(self.file_path)\n data = np.expand_dims(np.expand_dims(data[...,0],0),0)\n self.meta_data[\"ShapeSizeC\"] = 1\n self.meta_data[\"ShapeSizeZ\"] = 1\n self.meta_data[\"ShapeSizeX\"] = data.shape[2]\n self.meta_data[\"ShapeSizeY\"] = data.shape[3]\n self.meta_data[\"SizeZ\"] = 1\n self.meta_data[\"SizeX\"] = 0.01\n self.meta_data[\"SizeY\"] = 0.01\n #Bring all formats in the same shape.\n self.data = np.reshape(data,(self.meta_data[\"ShapeSizeC\"],self.meta_data[\"ShapeSizeZ\"],self.meta_data[\"ShapeSizeX\"],self.meta_data[\"ShapeSizeY\"]))\n self.meta_data['ChannelNum'] = self.meta_data[\"ShapeSizeC\"]\n #Set pixel size to manuell value if there are no metadata.\n if self.meta_data == {}:\n self.set_calibration(calibration_px)\n #Set the box for manuel calibration to the actuell pixel size.", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def load_data(self, f): \n self.sampling = True\n self.reads = np.load(f)\n self.total = self.reads.shape[0]", "def read_data(self):\n if not self.header['data included']:\n pass\n elif self.header['file type'] in (21, 26):\n self._isotope_data()\n if os.path.exists(self.filename + '_txt'):\n self._isotope_txt_data()\n elif self.header['file type'] == 22:\n # line scan types, no ImageHeader\n warnings.warn('No data read for line scan, fix')\n pass\n elif self.header['file type'] in (31, 35):\n self._beamstability_data()\n else:\n self._image_data()", "def test_mcg_data_compression(\n self, mcg_obj, awscli_pod, bucket_factory, bucketclass_dict\n ):\n download_dir = \"/aws/compression/\"\n awscli_pod.exec_cmd_on_pod(\n command=craft_s3_command(\n f\"cp s3://{constants.TEST_FILES_BUCKET}/enwik8 {download_dir}\"\n ),\n out_yaml_format=False,\n )\n bucketname = bucket_factory(1, bucketclass=bucketclass_dict)[0].name\n full_object_path = f\"s3://{bucketname}\"\n sync_object_directory(awscli_pod, download_dir, full_object_path, mcg_obj)\n # For this test, enwik8 is used in conjunction with Snappy compression\n # utilized by NooBaa. Snappy consistently compresses 35MB of the file.\n mcg_obj.check_data_reduction(bucketname, 35 * 1024 * 1024)", "def _loadBinaryData_compressed(self, filename, with_axis=None): \n self.set_data_writable() \n _data = numpy.load(filename)[\"data\"]\n self.data = self._extract_data_with_axis(_data, with_axis)\n self.set_data_protected()", "def readFastaFile(filename):", "def preload(self):\n # load the objects\n for otype, fname in self.TYPE2NAME.items():\n if fname:\n path = os.path.join(self.anodir, fname + \".gz\")\n if os.path.isfile(path):\n with gzip.open(path, \"rt\") as handler:\n for line in handler:\n omap = json.loads(line)\n cls = self.TYPE2CLASS[otype]\n item = cls.from_map(omap, self)\n self.caches[otype][item.id] = item", "def try3():\n path = '/Users/mayankkejriwal/datasets/eswc2016/'\n total = 10\n count = 1\n with gzip.open(path+'freebase-rdf-latest.gz', 'rb') as f:\n for line in f:\n print 'line : ',\n print line\n if count > total:\n break\n count += 1", "def parse_header(self):\n self._get_decompressor()\n whs = jpeg.ffi.new(\"int[]\", 3)\n whs_base = int(jpeg.ffi.cast(\"size_t\", whs))\n whs_itemsize = int(jpeg.ffi.sizeof(\"int\"))\n n = self.lib_.tjDecompressHeader2(\n self.decompressor.handle_,\n jpeg.ffi.cast(\"unsigned char*\",\n self.source.__array_interface__[\"data\"][0]),\n self.source.nbytes,\n jpeg.ffi.cast(\"int*\", whs_base),\n jpeg.ffi.cast(\"int*\", whs_base + whs_itemsize),\n jpeg.ffi.cast(\"int*\", whs_base + whs_itemsize + whs_itemsize))\n if n:\n raise JPEGRuntimeError(\"tjDecompressHeader2() failed with error \"\n \"%d and error string %s\" %\n (n, self.get_last_error()), n)\n self.width = int(whs[0])\n self.height = int(whs[1])\n self.subsampling = int(whs[2])", "def _read_files(self) -> MMD:\n\t\theaders = []\n\t\tbodies = []\n\t\tif self.config.file_type == FileType.CSV:\n\t\t\tif self.config.source_uris.endswith('.zip'):\n\t\t\t\twith ZipFile(self.config.source_uris) as zf:\n\t\t\t\t\tfor item in zf.namelist():\n\t\t\t\t\t\tif item.endswith('.csv'):\n\t\t\t\t\t\t\t# with zf.open(item, 'r') as infile:\n\t\t\t\t\t\t\tcsv_reader = csv.reader(TextIOWrapper(zf.open(item, 'r'), 'utf-8'))\n\t\t\t\t\t\t\theaders.append(next(csv_reader))\n\t\t\t\t\t\t\t# need to find a more efficient way, the csv reader is a generator that can only be used once\n\t\t\t\t\t\t\tbodies.append(list(csv_reader))\n\t\t\telif self.config.source_uris.endswith('.csv'):\n\t\t\t\tfor uri in self.config.source_uris:\n\t\t\t\t\tif uri.endswith('.csv'):\n\t\t\t\t\t\tcsv_reader = csv.reader(open(uri, newline='', encoding='utf-8'))\n\t\t\t\t\t\theaders.append(next(csv_reader))\n\t\t\t\t\t\tbodies.append(list(csv_reader))\n\t\telif self.config.file_type == FileType.CNSCHEMA:\n\t\t\theader = ['@id', 'label_@language', 'label_@value']\n\t\t\tbody = []\n\t\t\twith open(self.config.source_uris, 'r') as load_f:\n\t\t\t\tload_dict = json.load(load_f)\n\t\t\t\theader.extend(load_dict['@context'].keys())\n\t\t\t\theader = [h for h in header if h not in ['label', 'range', 'domain', 'subClassOf']]\n\t\t\t\ttmp_h = [h for h in header if h not in ['@id', '@language', '@value']]\n\t\t\t\tfor item in load_dict['@graph']:\n\t\t\t\t\tif item['@id'].split('/')[-2] == 'resource':\n\t\t\t\t\t\trow = [item['@id'], item['label']['@language'], item['label']['@value']]\n\t\t\t\t\t\tfor h in tmp_h:\n\t\t\t\t\t\t\tif h in item:\n\t\t\t\t\t\t\t\trow.append(item[h])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\trow.append(None)\n\t\t\t\t\t\tbody.append(tuple(row))\n\t\t\theaders.append(tuple(header))\n\t\t\tbodies.append(body)\n\t\telif self.config.file_type == FileType.OPENBASE:\n\t\t\theader = []\n\t\t\tbody = []\n\t\t\twith open(self.config.source_uris, 'r') as load_f:\n\t\t\t\tfor line in load_f:\n\t\t\t\t\trow = []\n\t\t\t\t\tflat_line = flatten_json(json.loads(line))\n\t\t\t\t\tfor key in flat_line:\n\t\t\t\t\t\tif key not in header:\n\t\t\t\t\t\t\theader.append(key)\n\t\t\t\t\tfor h in header:\n\t\t\t\t\t\tif h in flat_line:\n\t\t\t\t\t\t\trow.append(flat_line[h])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\trow.append(None)\n\t\t\t\t\tbody.append(row)\n\t\t\tfor item in body:\n\t\t\t\tif len(item) < len(header):\n\t\t\t\t\titem.extend([None for i in range(len(header) - len(item))])\n\t\t\theaders.append(tuple(header))\n\t\t\tbodies.append(tuple([tuple(item) for item in body]))\n\t\telif self.config.file_type == FileType.OPENKS:\n\t\t\t# knowledge graph dataset loading \n\t\t\tif os.path.exists(self.config.source_uris + '/entities') and os.path.exists(self.config.source_uris + '/triples'):\n\t\t\t\theaders = [['entities'], ['triples']]\n\t\t\t\tfor file in ['entities', 'triples']:\n\t\t\t\t\ttmp = []\n\t\t\t\t\twith open(self.config.source_uris + '/' + file, 'r') as load_f:\n\t\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\t\ttmp.append(tuple([item.strip() for item in line.split('\\t')]))\n\t\t\t\t\t\tbodies.append(tuple(tmp))\n\t\t\t# general text dataset loading\n\t\t\telif os.path.exists(self.config.source_uris + '/train') and os.path.exists(self.config.source_uris + '/valid'):\n\t\t\t\theaders = [['train'], ['valid']]\n\t\t\t\tfor file in ['train', 'valid']:\n\t\t\t\t\ttmp = []\n\t\t\t\t\twith open(self.config.source_uris + '/' + file, 'r') as load_f:\n\t\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\t\ttmp.append(tuple([item.strip() for item in line.split('@@')]))\n\t\t\t\t\t\tbodies.append(tuple(tmp))\n\t\t\telse:\n\t\t\t\tlogger.warn('Only allows loading with entities and triples for now!')\n\t\t\t\traise IOError\n\t\telif self.config.file_type == FileType.NERO:\n\t\t\theaders = [['unlabeled_data'], ['predict'], ['pattern']]\n\t\t\tfor file in ['unlabeled_data', 'predict', 'pattern']:\n\t\t\t\ttmp = []\n\t\t\t\twith open(self.config.source_uris + '/' + file + '.json', 'r') as load_f:\n\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\ttmp.append(line.strip())\n\t\t\t\t\tbodies.append(tuple(tmp))\n\n\t\tmmd.name = self.config.data_name\n\t\tmmd.headers = headers\n\t\tmmd.bodies = bodies\n\t\treturn mmd", "def _load_mock_bgs_mxxl_file_fits(filename):\n desitarget.io.check_fitsio_version()\n data = fitsio.read(filename,\n columns= ['objid','brickid',\n 'RA','DEC','Z', 'R'])\n\n objid = data['objid'].astype('i8')\n brickid = data['brickid'].astype('i8')\n ra = data['RA'].astype('f8') % 360.0 #enforce 0 < ra < 360\n dec = data['DEC'].astype('f8')\n SDSSr_true = data['R'].astype('f8')\n zred = data['Z'].astype('f8')\n\n return {'objid':objid,'brickid':brickid,\n 'RA':ra, 'DEC':dec, 'Z': zred , \n 'SDSSr_true':SDSSr_true}", "def yomaha2nc4(finame, foname, line_buffer=100000, zlib=False):\n\n MISS_OUT = -999\n\n tic = tm.time()\n\n print \"yomaha2nc4\"\n print \"working with\"\n print finame\n print foname\n\n #=====================================================================\n # Set up the metadata\n #=====================================================================\n\n missing = ['-999.9999' if i in [0, 8, 15, 18, 21]\n else '-99.9999' if i in [1, 9, 16, 19, 22]\n else '-999.999' if i in [3, 10, 17, 20, 23]\n else '-999.9' if i == 2\n else '-999.99' if i in [4, 5, 6, 7, 11, 12, 13, 14]\n else '-128' if i == 27\n else '-999'\n for i in range(28)]\n\n variables = [\n {'name': 'x_deep',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude'},\n {'name': 'y_deep',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude'},\n {'name': 'z_park',\n 'unit': 'dbar',\n 'long_name': 'Parking Pressure'},\n {'name': 't_deep',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time'},\n {'name': 'u_deep',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of zonal deep velocity'},\n {'name': 'v_depth',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of meridional deep velocity'},\n {'name': 'e_u_deep',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of error of zonal deep velocity'},\n {'name': 'e_v_deep',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of error of meridional deep velocity'},\n {'name': 'x_surf',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude'},\n {'name': 'y_surf',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude'},\n {'name': 't_surf',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time'},\n {'name': 'u_surf',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of zonal velocity at sea surface'},\n {'name': 'v_surf',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of meridional velocity at sea surface'},\n {'name': 'e_u_surf',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of error of zonal velocity at sea surface'},\n {'name': 'e_v_surf',\n 'unit': 'cm/s',\n 'long_name': 'Estimate of error of meridional velocity at sea surface'},\n {'name': 'x_last_prev',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude of the last fix at the surface during previous cycle'},\n {'name': 'y_last_prev',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude of the last fix at the surface during previous cycle'},\n {'name': 't_last_prev',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time of the last fix at the surface during previous cycle'},\n {'name': 'x_first',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude of the first fix at the surface'},\n {'name': 'y_first',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude of the first fix at the surface'},\n {'name': 't_first',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time of the first fix at the surface'},\n {'name': 'x_last',\n 'unit': 'degrees_east',\n 'long_name': 'Longitude of the last fix at the surface'},\n {'name': 'y_last',\n 'unit': 'degrees_north',\n 'long_name': 'Latitude of the last fix at the surface'},\n {'name': 't_last',\n 'unit': 'days since 2000-01-01 0:0:0 UTC',\n 'long_name': 'Time of the last fix at the surface'},\n {'name': 'n_fix',\n 'unit': '',\n 'long_name': 'Number of surface fixes'},\n {'name': 'float_id',\n 'unit': '',\n 'long_name': 'Float ID'},\n {'name': 'n_cycle',\n 'unit': '',\n 'long_name': 'Cycle number'},\n {'name': 'inv_flag',\n 'unit': '',\n 'long_name': 'Time inversion/duplication flag'},\n ]\n\n dtype = [np.int32 if i in [24, 25, 26]\n else np.byte if i == 27\n else np.float32\n for i in range(28)]\n\n #=====================================================================\n # Set up the output file\n #=====================================================================\n var = []\n\n # get file length\n length = 0\n with open(finame, 'r') as fi:\n for line in fi:\n length += 1\n\n # create the out-file\n fo = nc.Dataset(foname, mode='w', format='NETCDF4', clobber=True)\n\n # create dims and vlan data type\n fo.createDimension('id', size=length)\n id_v = fo.createVariable('id', np.int64, 'id',\n zlib=zlib, fill_value=MISS_OUT)\n id_v[:] = range(1, length + 1)\n\n for i in range(len(variables)):\n v_dict = variables[i]\n v_obj = fo.createVariable(v_dict['name'], dtype[i], 'id', zlib=zlib,\n fill_value=missing[i])\n v_obj.units = v_dict['unit']\n v_obj.long_name = v_dict['long_name']\n var.append(v_obj)\n\n #=====================================================================\n # read and write the data\n #=====================================================================\n buf = [[] for i in range(len(variables))]\n idx = 0\n with open(finame, 'r') as fi:\n old_idx = idx\n for line in fi:\n idx += 1\n line = line.strip()\n [buf[i].append(dtype[i](val)) if val != missing[i]\n else buf[i].append(dtype[i](MISS_OUT))\n for i, val in enumerate(line.split())]\n # write chunk to disk and clear buffer\n if np.mod(idx, line_buffer) == 0:\n# id_v[old_idx:idx-1] = range(old_idx + 1,\n# len(buf[i][:]) + old_idx + 1)\n for i in range(len(variables)):\n var[i][old_idx:idx] = np.ma.array(\n buf[i],\n mask=[val == dtype[i](MISS_OUT)\n for val in buf[i]])\n\n old_idx = idx\n buf = [[] for i in range(len(variables))]\n # write last peace to file\n if old_idx != idx:\n# id_v[old_idx:idx - 1] = range(old_idx + 1, len(buf[i][:]) + old_idx + 1)\n for i in range(len(variables)):\n var[i][old_idx:idx] = np.ma.array(buf[i],\n mask=[val == dtype[i](MISS_OUT)\n for val in buf[i]])\n\n #=====================================================================\n # clean up and finish\n #=====================================================================\n fo.close()\n print \"yomaha2nc4 done after % 12.6f seconds\" % (tm.time() - tic)\n\n return None", "def __read_header(self):\n header = self.__file_object.readline()\n header_string = header.decode('utf-8')\n print(header_string)\n # Ignore first letter\n self.frame_width = int(re.findall('W\\d+', header_string)[0][1:])\n self.frame_height = int(re.findall('H\\d+', header_string)[0][1:])\n self.frame_rate = re.findall('F\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual frame rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.frame_rate.split(':')]\n self.frame_rate = round(tokens[0] / tokens[1], 1)\n\n self.__pixel_aspect_ratio = re.findall('A\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual pixel aspect ratio rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.__pixel_aspect_ratio.split(':')]\n self.__pixel_aspect_ratio = round(tokens[0] / tokens[1], 1)\n\n # Don't ignore for interlacing\n self.__interlacing_mode = re.findall('I(p|t|b|m)', header_string)[0]\n\n # Ignore first 'FRAME\\n' terminator so the file object points to the first byte of raw data of the first frame\n self.__file_object.readline()\n\n self.__first_frame_raw_data_position = self.__file_object.tell()\n\n self.determine_color_space_by_frame_size()\n\n # Restore\n self.__file_object.seek(self.__first_frame_raw_data_position)\n\n return header\n\n # Color space parameter is missing?\n print('FourCC:\\t\\t', header_string[:4])\n print('Input file:\\t', self.__input_file_path)\n print('Frame size:\\t', f'{self.frame_width}x{self.frame_height}')\n print('Frame rate:\\t', f'{self.frame_rate} FPS')\n print('Aspect Ratio:\\t', self.__pixel_aspect_ratio)\n print('Color space\\t', self.color_space)\n print('Frame size (raw data):', self.__frame_raw_data_size)\n print('Position of first raw:', self.__first_frame_raw_data_position)", "def load_materials(file_data, headers, base_path):\n\n\n def load_material_texture(texture_file):\n filename = os.path.join(base_path, texture_file + \".jpg\")\n try:\n img = bpy.data.images.load(str(filename))\n cTex = bpy.data.textures.new('ColorTex', type = 'IMAGE')\n cTex.image = img\n return cTex\n except:\n print (\"Cannot load image {}\".format(filename))\n return None\n\n\n def material_from_pack(material):\n \"\"\" \n Extract just the data we want from the full chunk\n \"\"\"\n texture_file_name = material[0].decode(\"utf-8\").replace('\\x00', '').strip()\n return (\n texture_file_name,\n load_material_texture(texture_file_name)\n )\n texture_offset, texture_length = headers[1]\n texture_chunk = Struct(\"64sii\") \n texture_size = texture_chunk.size\n texture_count = int(texture_length / texture_size)\n\n textures = []\n for current_texture_idx in range(texture_count):\n texture_file_position = texture_offset + current_texture_idx * texture_size\n packed_texture = texture_chunk.unpack(file_data[texture_file_position : texture_file_position+texture_size])\n current_texture = material_from_pack(packed_texture)\n textures.append(current_texture)\n \n return textures", "def _load_mock_bgs_mxxl_file_hdf5(filename):\n f = h5py.File(filename)\n ra = f[\"Data/ra\"][...].astype('f8') % 360.0\n dec = f[\"Data/dec\"][...].astype('f8')\n SDSSr_true = f[\"Data/app_mag\"][...].astype('f8')\n zred = f[\"Data/z_obs\"][...].astype('f8')\n f.close()\n\n return {'RA':ra, 'DEC':dec, 'Z': zred ,\n 'SDSSr_true':SDSSr_true}", "def process_raw_data_amld(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack, shift, maxSpeed='45',\n minSpeed='2'):\n import os\n import sys\n if sys.platform == 'win32':\n windows = True\n elif sys.platform != 'win32':\n windows = False\n\n import pandas as pd\n import datetime\n import os\n import gzip\n import numpy as np\n # import csv\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n xMinCarSpeed = -10\n ########################################################################\n #### WE DON'T HAVE AN RSSI INPUT\n ### (SO THIS IS A PLACEHOLDER FOR SOME SORT OF QA/QC VARIABLE)\n ## xMinRSSI = 50 #if RSSI is below this we don't like it\n ##################################################################\n\n # reading in the data with specific headers\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n sHeader = 'Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude,U (m/sec),V (m/sec),W (m/sec),T (degC),Dir (deg),Speed (m/sec),Compass (deg)'\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n infoHeader = \"FILENAME\\n\"\n # somehow gZIP is indicating if it is the first file name (I think if it is 0 then it is the first file)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename,\n 'r') # if in python 3, change this to \"r\" or just \"b\" can't remember but something about a bit not a string\n else:\n f = open(xDir + xFilename, 'r')\n\n infoHeader = \"FILENAME\\n\"\n\n # process - if first time on this car/date, then write header out\n headerNames = sHeader.split(',')\n #xdat = str('20') + xFilename[10:16]\n xdat = xDate\n fnOut = xOut + xCar + \"_\" + xdat + \"_dat.csv\" # set CSV output for raw data\n removeOut = xOut + xCar + \"_\" + xdat + \"_removed.csv\"\n fnLog = xOut + xCar + \"_\" + xdat + \".log\" # output for logfile\n infOut = xOut + xCar + \"_\" + xdat + \"_info.csv\"\n #\n\n #dtime = open(xDir + xFilename).readlines().pop(2).split(',')[0]\n #firstdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n # int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n # firsttime = firstdate.strftime('%s.%f')\n #firsttime = dt_to_epoch(firstdate)\n firsttime = float(open(xDir + xFilename).readlines().pop(2).split(',')[0])\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n ## read in file\n tempFile = pd.read_csv(xDir+xFilename)\n tempFile['DATE'] = tempFile.apply(lambda x: datetime.datetime.fromtimestamp(x.nearest10hz).strftime('%Y-%m-%d'),axis=1)\n tempFile['TIME'] = tempFile.apply(lambda x: datetime.datetime.fromtimestamp(x.nearest10hz).strftime('%H:%M:%S'),axis=1)\n tempFile['SECONDS'] = tempFile.apply(lambda x: int(float(str(x.nearest10hz)[10:])*1e9),axis=1)\n tempFile = tempFile.rename(columns = {'Velocity':'VELOCITY',\n 'Latitude':'LAT',\n 'Longitude':'LONG'})\n tempFile1 = tempFile.copy().sort_values('nearest10hz').reset_index(drop=True)\n\n if bFirst:\n #tempFile.sort_values('nearest10hz').reset_index(drop=True).to_csv(fnOutTemp)\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n\n if not bFirst:\n #fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n #fLog.write(\"Processing file: \" + str(xFilename) + \"\\n\")\n\n wind_df4 = tempFile1.copy()\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > -1, :]\n wrongSpeed = wind_df4.loc[wind_df4.VELOCITY <= xMinCarSpeed,:]\n wrongSpeed=wrongSpeed.assign(Reason='velocity too slow')\n\n #wind_df6 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :]\n wind_df6 = wind_df5.loc[wind_df5.VELOCITY < 1000, :]\n\n wrongSpeed2 = wind_df5.loc[wind_df5.VELOCITY >= xMaxCarSpeed, :]\n wrongSpeed2 = wrongSpeed2.assign(Reason='velocity too fast')\n\n wrongSpeeds = pd.concat([wrongSpeed,wrongSpeed2])\n #notGood = pd.concat([wrongSpeeds,nullCH4])\n notGood = pd.concat([wrongSpeeds])\n\n # wind_df6 = wind_df6a.loc[wind_df6a.R > .6999, :]\n\n del (wind_df4)\n wind_df4 = wind_df6.copy().drop_duplicates()\n wind_df5 = wind_df4.loc[wind_df4.CH4.notnull(), :]\n\n nullCH4 = wind_df4.loc[~wind_df4.CH4.notnull(), :]\n if nullCH4.shape[0] > 0:\n nullCH4 = nullCH4.assign(Reason='CH4 NA')\n removedDF = pd.concat([notGood,nullCH4])\n elif nullCH4.shape[0]==0:\n removedDF = notGood\n wind_df4 = wind_df5.copy()\n\n def rolling_cor(df, first, sec, window, newname):\n if (window % 2 == 1):\n sidewind = (window - 1) / 2\n else:\n sidewind = window / 2\n\n length = df.shape[0]\n cor_i = []\n for i in range(length):\n if (((i) < sidewind) or (i >= (length - sidewind))):\n cor_i.append(0)\n else:\n xvals = df.loc[(i - sidewind):(i + sidewind + 1), first]\n yvals = df.loc[(i - sidewind):(i + sidewind + 1), sec]\n cor_i.append(xvals.corr(yvals))\n df.loc[:, newname] = cor_i\n return (df)\n\n def rolling_c2h6(df, colname, window, percentile, newname):\n import numpy as np\n if (window % 2 == 1):\n sidewind = (window - 1) / 2\n else:\n sidewind = window / 2\n\n length = df.shape[0]\n cor_i = []\n for i in range(length):\n if (((i) < sidewind) or (i >= (length - sidewind))):\n cor_i.append(0)\n else:\n c2h6vals = df.loc[(i - sidewind):(i + sidewind + 1), colname]\n cor_i.append(np.percentile(c2h6vals, percentile))\n df.loc[:, newname] = cor_i\n return (df)\n\n wind_df5 = rolling_cor(wind_df4,'CH4','C2H6',80,'rollingR_8')\n wind_df6 = rolling_cor(wind_df5,'CH4','C2H6',150,'rollingR_15')\n wind_df7 = rolling_cor(wind_df6,'CH4','C2H6',300,'rollingR_30')\n wind_df8 = rolling_cor(wind_df7,'CH4','C2H6',450,'rollingR_45')\n wind_df9 = rolling_cor(wind_df8,'CH4','C2H6',600,'rollingR_60')\n wind_df10 = rolling_c2h6(wind_df9,'C2H6',300,50,'rollingc2h6_30')\n wind_df11 = rolling_c2h6(wind_df10,'C2H6',150,50,'rollingc2h6_15')\n wind_df12 = rolling_c2h6(wind_df11,'C2H6',450,50,'rollingc2h6_45')\n\n wind_df13 = rolling_c2h6(wind_df12,'CH4',450,50,'rollingch4_45')\n wind_df14 = rolling_c2h6(wind_df13,'CH4',300,50,'rollingch4_30')\n wind_df15 = rolling_c2h6(wind_df14,'CH4',150,50,'rollingch4_15')\n wind_df16 = rolling_c2h6(wind_df15,'CH4',600,50,'rollingch4_60')\n\n\n del(wind_df4)\n wind_df4 = wind_df16.copy()\n ## if you want to filter out high temperatures\n #wind_df4 = wind_df5.loc[wind_df5.TEMPC < 95, :].reset_index(drop=True)\n\n #fLog.write(\"Usable lines - \" + str(wind_df4.shape[0]) + \".\" + \"\\n\")\n #fLog.close()\n\n if bFirst:\n wind_df4.to_csv(fnOut, index=False)\n removedDF.to_csv(removeOut,index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(fnOut, index=False)\n removed = pd.read_csv(removeOut)\n pd.concat([removed, removedDF]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(removeOut, index=False)\n\n #os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def process_raw_data_aeris2(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack, shift, maxSpeed='45',\n minSpeed='2'):\n import sys\n if sys.platform == 'win32':\n windows = True\n elif sys.platform != 'win32':\n windows = False\n\n import pandas as pd\n from datetime import datetime\n import os\n import gzip\n import numpy as np\n # import csv\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n\n ########################################################################\n #### WE DON'T HAVE AN RSSI INPUT\n ### (SO THIS IS A PLACEHOLDER FOR SOME SORT OF QA/QC VARIABLE)\n ## xMinRSSI = 50 #if RSSI is below this we don't like it\n ##################################################################\n\n # reading in the data with specific headers\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n sHeader = 'Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude,U (m/sec),V (m/sec),W (m/sec),T (degC),Dir (deg),Speed (m/sec),Compass (deg)'\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n infoHeader = \"FILENAME\\n\"\n # somehow gZIP is indicating if it is the first file name (I think if it is 0 then it is the first file)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename,\n 'r') # if in python 3, change this to \"r\" or just \"b\" can't remember but something about a bit not a string\n else:\n f = open(xDir + xFilename, 'r')\n\n infoHeader = \"FILENAME\\n\"\n\n # process - if first time on this car/date, then write header out\n headerNames = sHeader.split(',')\n xdat = str('20') + xFilename[11:17]\n fnOut = xOut + xCar + \"_\" + xdat + \"_dat.csv\" # set CSV output for raw data\n fnLog = xOut + xCar + \"_\" + xdat + \"_log.csv\" # output for logfile\n infOut = xOut + xCar + \"_\" + xdat + \"_info.csv\"\n #\n\n dtime = open(xDir + xFilename).readlines().pop(1).split(',')[0]\n firstdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n #firsttime = firstdate.strftime('%s.%f')\n firsttime = dt_to_epoch(firstdate)\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n if bFirst:\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n if not bFirst:\n fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n fOut = open(fnOutTemp, 'w')\n fOut.write(sOutHeader)\n\n # read all lines\n xCntObs = -1\n xCntGoodValues = 0\n for row in f:\n woo = row\n bGood = True\n if xCntObs < 0:\n bGood = False\n xCntObs += 1\n if bGood:\n lstS = row.split(\",\")\n dtime = lstS[0]\n dateob = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n fdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n #seconds = fdate.strftime('%s.%f')\n seconds = dt_to_epoch(fdate)\n def getNS(seconds):\n ns = str(float(seconds) * 1e-3)[11:]\n # str(pd.to_numeric(str(float(seconds) * 1e-3)[11:]) * 100000)[:9]\n return (str(ns).ljust(15, '0'))[:9]\n\n import sys\n if sys.platform.startswith('win'):\n csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(\n dateob.strftime('%H:%M:%S')) + ',' + str(\n int(pd.to_numeric(dateob.strftime('%S.%f')))) + ',' + str(\n pd.to_numeric(dateob.strftime('%f')) * 1000) + str(',')\n csvWrite += str('50') + ',' + str('0') + ',' + str('0') + ',' + str('0') + ',' + str(\n lstS[4]) + ',' + str('0') + ',' + str(lstS[4]) + ','\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(\n lstS[3]) + ',' + str(lstS[4]) + ',' + str(lstS[5]) + ',' + str(lstS[6]) + ','\n csvWrite += str(lstS[7]) + ',' + str(lstS[8]) + ',' + str(lstS[9]) + ',' + str(\n lstS[10]) + ',' + str(lstS[11]) + ',' + str(lstS[12]) + ',' + str(lstS[13]) + str(\n ',') + str(lstS[14])\n if not sys.platform.startswith('win'):\n csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(\n dateob.strftime('%H:%M:%S')) + ',' + str(str(float(seconds)*1e-3)[:10]) + ',' + getNS(seconds)+ str(',')\n csvWrite += str(lstS[20]) + ',' + str(lstS[15]) + ',' + str(lstS[16]) + ',' + str(\n lstS[17]) + ',' + str(\n lstS[4]) + ',' + str('0') + ',' + str(lstS[4]) + ','\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(\n lstS[3]) + ',' + str(lstS[4]) + ',' + str(lstS[5]) + ',' + str(lstS[6]) + ','\n csvWrite += str(lstS[7]) + ',' + str(lstS[8]) + ',' + str(lstS[9]) + ',' + str(\n lstS[10]) + ',' + str(lstS[11]) + ',' + str(lstS[12]) + ',' + str(lstS[13]) + str(\n ',') + str(lstS[14]) + '\\n'\n fOut.write(csvWrite)\n xCntObs += 1\n infOut.write(str(xFilename) + '\\n')\n fOut.close()\n fLog.close()\n infOut.close()\n # print(xCar + \"\\t\" + xdat + \"\\t\" + fnOut[-22:] + \"\\t\" + str(xCntObs) + \"\\t\" + str(xCntGoodValues) + \"\\t\" + str(\n # gZIP))\n print(f\"{xCar} \\t {xdat} \\t {fnOut[-(17 + len(xCar)):]} \\t {xCntObs} \\t {xCntGoodValues} \\t {gZIP}\")\n\n wind_df = pd.read_csv(fnOutTemp)\n wind_df_not_null = wind_df.loc[wind_df['LAT'].notnull(),].reset_index(drop=True)\n del (wind_df)\n wind_df = wind_df_not_null.copy()\n\n radians = False\n wind_df['QUADRANT'] = wind_df.apply(lambda row: get_quadrant(row['U'], row['V']), axis=1)\n wind_df['secnan'] = wind_df.apply(lambda row: row['SECONDS'] + row['NANOSECONDS'] * 1e-9,\n axis=1) # + row['NANOSECONDS']*1e-9,axis=1)\n wind_df['prev_LAT'] = wind_df.LAT.shift(periods=1)\n wind_df['next_LAT'] = wind_df.LAT.shift(periods=-1)\n wind_df['prev_LONG'] = wind_df.LONG.shift(periods=1)\n wind_df['next_LONG'] = wind_df.LONG.shift(periods=-1)\n wind_df['prev_TIME'] = wind_df.secnan.shift(periods=1)\n wind_df['next_TIME'] = wind_df.secnan.shift(periods=-1)\n wind_df['distance'] = wind_df.apply(\n lambda row: haversine(row['prev_LAT'], row['prev_LONG'], row['next_LAT'], row['next_LONG']), axis=1)\n\n wind_df['bearing'] = wind_df.apply(\n lambda row: calc_bearing(row['prev_LAT'], row['next_LAT'], row['prev_LONG'], row['next_LONG'], radians),\n axis=1)\n wind_df['timediff'] = wind_df.apply(lambda row: row['next_TIME'] - row['prev_TIME'], axis=1)\n # wind_df['VELOCITY_calc'] = wind_df.apply(lambda row:calc_velocity(row['timediff'],row['distance']),axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda x: (str(x.VELOCITY)),axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda x: 0 if x.VELOCITY == 'XX.X' else x.VELOCITY,axis = 1)\n wind_df['fVel'] = wind_df[\"VELOCITY\"].str.split(\".\", n = 1, expand = True)[0]\n wind_df = wind_df.loc[wind_df['fVel'].notnull(),].reset_index(drop=True)\n wind_df['firstVel'] = wind_df.apply(lambda x: int(x['fVel']),axis = 1)\n\n wind_df['sVel'] = wind_df[\"VELOCITY\"].str.split(\".\", n = 1, expand = True)[1]\n wind_df = wind_df.loc[wind_df['sVel'].notnull(),].reset_index(drop=True)\n wind_df['secVel'] = wind_df.apply(lambda x: int(x['sVel']),axis = 1)\n wind_df['vloc'] = wind_df.apply(lambda x: float(str(x.firstVel) + '.' + str(x.secVel)),axis = 1)\n wind_df2 = wind_df.drop(columns = ['VELOCITY','secVel','sVel','fVel','firstVel'])\n del(wind_df)\n wind_df2 = wind_df2.rename(columns = {'vloc':'VELOCITY'})\n wind_df = wind_df2.copy()\n del(wind_df2)\n ## CORRECT W WIND THING\n wind_df['W'] = wind_df.apply(lambda x: (str(x.W)),axis=1)\n wind_df['W'] = wind_df.apply(lambda x: 0 if x.W == 'XX.X' else x.W,axis = 1)\n wind_df['fW'] = wind_df[\"W\"].str.split(\".\", n = 1, expand = True)[0]\n #wind_df = wind_df.loc[wind_df['fW'].notnull(),].reset_index(drop=True)\n wind_df['firstW'] = wind_df.apply(lambda x: int(x['fW']),axis = 1)\n wind_df['sW'] = wind_df[\"W\"].str.split(\".\", n = 1, expand = True)[1]\n #wind_df = wind_df.loc[wind_df['sW'].notnull(),].reset_index(drop=True)\n wind_df['secW'] = wind_df.apply(lambda x: int(x['sW']),axis = 1)\n wind_df['wloc'] = wind_df.apply(lambda x: float(str(x.firstW) + '.' + str(x.secW)),axis = 1)\n wind_df2 = wind_df.drop(columns = ['W','secW','sW','fW','firstW'])\n del(wind_df)\n wind_df2 = wind_df2.rename(columns = {'wloc':'W'})\n wind_df = wind_df2.copy()\n del(wind_df2)\n\n ## CORRECT U WIND THING\n wind_df['U'] = wind_df.apply(lambda x: (str(x.U)),axis=1)\n wind_df['U'] = wind_df.apply(lambda x: 0 if x.U == 'XX.X' else x.U,axis = 1)\n wind_df['fU'] = wind_df[\"U\"].str.split(\".\", n = 1, expand = True)[0]\n wind_df['firstU'] = wind_df.apply(lambda x: int(x['fU']),axis = 1)\n wind_df['sU'] = wind_df[\"U\"].str.split(\".\", n = 1, expand = True)[1]\n wind_df['secU'] = wind_df.apply(lambda x: int(x['sU']),axis = 1)\n wind_df['uloc'] = wind_df.apply(lambda x: float(str(x.firstU) + '.' + str(x.secU)),axis = 1)\n wind_df2 = wind_df.drop(columns = ['U','secU','sU','fU','firstU'])\n del(wind_df)\n wind_df2 = wind_df2.rename(columns = {'uloc':'U'})\n wind_df = wind_df2.copy()\n del(wind_df2)\n\n ## CORRECT V WIND THING\n wind_df['V'] = wind_df.apply(lambda x: (str(x.V)),axis=1)\n wind_df['V'] = wind_df.apply(lambda x: 0 if x.V == 'XX.X' else x.V,axis = 1)\n wind_df['fV'] = wind_df[\"V\"].str.split(\".\", n = 1, expand = True)[0]\n wind_df['firstV'] = wind_df.apply(lambda x: int(x['fV']),axis = 1)\n wind_df['sV'] = wind_df[\"V\"].str.split(\".\", n = 1, expand = True)[1]\n wind_df['secV'] = wind_df.apply(lambda x: int(x['sV']),axis = 1)\n wind_df['vloc'] = wind_df.apply(lambda x: float(str(x.firstV) + '.' + str(x.secV)),axis = 1)\n wind_df2 = wind_df.drop(columns = ['V','secV','sV','fV','firstV'])\n del(wind_df)\n wind_df2 = wind_df2.rename(columns = {'vloc':'V'})\n wind_df = wind_df2.copy()\n del(wind_df2)\n\n wind_df['U_cor'] = wind_df.apply(lambda row: float(row['U']) + float(row['VELOCITY']), axis=1)\n wind_df['horz_length'] = wind_df.apply(lambda row: np.sqrt(row['U_cor'] ** 2 + row['V'] ** 2), axis=1)\n wind_df['uncor_theta'] = wind_df.apply(\n lambda row: calc_bearing(row['U_cor'], row['V'], row['QUADRANT'], row['horz_length'], radians), axis=1)\n wind_df['adj_theta'] = wind_df.apply(lambda row: (row['uncor_theta'] + row['bearing']) % 360, axis=1)\n wind_df['totalWind'] = wind_df.apply(lambda row: np.sqrt(row['horz_length'] ** 2 + row['W'] ** 2), axis=1)\n wind_df['phi'] = wind_df.apply(lambda row: np.arctan(row['horz_length']), axis=1)\n wind_df['shift_CH4'] = wind_df.CH4.shift(periods=int(float(shift)))\n wind_df['raw_CH4'] = wind_df.apply(lambda row: row['BCH4'], axis=1)\n wind_df['BCH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['CH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['TCH4'] = wind_df.loc[:, ['shift_CH4']]\n\n wind_df2 = wind_df[wind_df.CH4.notnull()]\n wind_df2 = wind_df.copy()\n wind_df3 = wind_df2.drop(\n ['QUADRANT', 'secnan', 'prev_LAT', 'next_LAT', 'prev_LONG', 'next_LONG', 'prev_TIME', 'next_TIME',\n 'timediff', 'uncor_theta', 'CH4'], axis=1)\n\n wind_df3['CH4'] = wind_df3.loc[:, 'shift_CH4']\n wind_df3 = wind_df3.drop(['shift_CH4'], axis=1)\n # wind_df4 = wind_df3.loc[wind_df3.totalWind.notnull(),:]\n wind_df3 = wind_df3.loc[:,\n ['DATE', 'TIME', 'SECONDS', 'NANOSECONDS', 'VELOCITY', 'U', 'V', 'W', 'BCH4', 'BRSSI', 'TCH4',\n 'TRSSI',\n 'PRESS_MBAR', 'INLET', 'TEMPC', 'CH4', 'H20', 'C2H6', 'R', 'C2C1', 'BATTV', 'POWMV', 'CURRMA',\n 'SOCPER',\n 'LAT', 'LONG', 'bearing', 'U_cor', 'horz_length', 'adj_theta', 'totalWind', 'phi', 'raw_CH4',\n 'distance']]\n wind_df3['odometer'] = wind_df3.loc[:, 'distance'].cumsum()\n wind_df4 = wind_df3.copy()\n\n # wind_df7 = add_odometer(wind_df4,'LAT','LONG')\n\n # wind_df4 = wind_df7.copy()\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > xMinCarSpeed, :]\n wind_df6 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :]\n\n del (wind_df4)\n wind_df4 = wind_df6.copy().drop_duplicates()\n wind_df5 = wind_df4.loc[wind_df4.CH4.notnull(), :]\n wind_df4 = wind_df5.copy()\n if bFirst:\n wind_df4.to_csv(fnOut, index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(fnOut, index=False)\n os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def read(self, fname):\r\n self.header = {}\r\n self.resetvals()\r\n infile = self._open(fname, \"rb\")\r\n self._readheader(infile)\r\n # Compute image size\r\n try:\r\n self.dim1 = int(self.header['NumberOfRowsInFrame'])\r\n self.dim2 = int(self.header['NumberOfColsInFrame'])\r\n self.bpp = int(self.header['BitsPerPixel'])\r\n except:\r\n raise Exception(\"GE file\", str(fname) + \\\r\n \"is corrupt, cannot read it\")\r\n\r\n # More than one image can be saved in a GE file\r\n # Will only load the first one\r\n\r\n\r\n # Go to the beginning of the file\r\n infile.seek(0)\r\n infile.seek(self.header['HeaderSizeInBytes'] + self.header['UserHeaderSizeInBytes'])\r\n\r\n ReadBytes = self.dim1 * self.dim2 * (self.bpp / 8)\r\n block = infile.read(ReadBytes)\r\n block = N.fromstring(block, N.uint16)\r\n\r\n infile.close()\r\n\r\n try:\r\n self.data = N.reshape(block, [self.dim2, self.dim1])\r\n except:\r\n print len(block), self.dim2, self.dim1\r\n raise IOError, \\\r\n 'Size spec in GE-header does not match size of image data field'\r\n\r\n self.bytecode = self.data.dtype.type\r\n self.pilimage = None\r\n return self", "def read_data(filename):\n # Store debug mode\n debug = params.debug\n params.debug = None\n\n # Initialize dictionary\n header_dict = {}\n\n headername = filename + \".hdr\"\n\n with open(headername, \"r\") as f:\n # Replace characters for easier parsing\n hdata = f.read()\n hdata = hdata.replace(\",\\n\", \",\")\n hdata = hdata.replace(\"\\n,\", \",\")\n hdata = hdata.replace(\"{\\n\", \"{\")\n hdata = hdata.replace(\"\\n}\", \"}\")\n hdata = hdata.replace(\" \\n \", \"\")\n hdata = hdata.replace(\";\", \"\")\n hdata = hdata.split(\"\\n\")\n\n # Loop through and create a dictionary from the header file\n for i, string in enumerate(hdata):\n if ' = ' in string:\n header_data = string.split(\" = \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n elif ' : ' in string:\n header_data = string.split(\" : \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n\n # Reformat wavelengths\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"{\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"}\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\" \", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].split(\",\")\n\n # Create dictionary of wavelengths\n wavelength_dict = {}\n for j, wavelength in enumerate(header_dict[\"wavelength\"]):\n wavelength_dict.update({float(wavelength): float(j)})\n\n # Replace datatype ID number with the numpy datatype\n dtype_dict = {\"1\": np.uint8, \"2\": np.int16, \"3\": np.int32, \"4\": np.float32, \"5\": np.float64, \"6\": np.complex64,\n \"9\": np.complex128, \"12\": np.uint16, \"13\": np.uint32, \"14\": np.uint64, \"15\": np.uint64}\n header_dict[\"data type\"] = dtype_dict[header_dict[\"data type\"]]\n\n # Read in the data from the file\n raw_data = np.fromfile(filename, header_dict[\"data type\"], -1)\n\n # Reshape the raw data into a datacube array\n array_data = raw_data.reshape(int(header_dict[\"lines\"]),\n int(header_dict[\"bands\"]),\n int(header_dict[\"samples\"])).transpose((0, 2, 1))\n\n if \"default bands\" in header_dict:\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"{\", \"\")\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"}\", \"\")\n default_bands = header_dict[\"default bands\"].split(\",\")\n\n pseudo_rgb = cv2.merge((array_data[:, :, int(default_bands[0])],\n array_data[:, :, int(default_bands[1])],\n array_data[:, :, int(default_bands[2])]))\n\n else:\n max_wavelength = max([float(i) for i in wavelength_dict.keys()])\n min_wavelength = min([float(i) for i in wavelength_dict.keys()])\n # Check range of available wavelength\n if max_wavelength >= 635 and min_wavelength <= 490:\n id_red = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 710)\n id_green = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 540)\n id_blue = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 480)\n\n pseudo_rgb = cv2.merge((array_data[:, :, [id_blue]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n else:\n # Otherwise take 3 wavelengths, first, middle and last available wavelength\n id_red = int(header_dict[\"bands\"]) - 1\n id_green = int(id_red / 2)\n pseudo_rgb = cv2.merge((array_data[:, :, [0]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n\n # Gamma correct pseudo_rgb image\n pseudo_rgb = pseudo_rgb ** (1 / 2.2)\n # Scale each of the channels up to 255\n pseudo_rgb = cv2.merge((rescale(pseudo_rgb[:, :, 0]),\n rescale(pseudo_rgb[:, :, 1]),\n rescale(pseudo_rgb[:, :, 2])))\n\n max_wl = float(str(header_dict[\"wavelength\"][-1]).rstrip())\n min_wl = float(str(header_dict[\"wavelength\"][0]).rstrip())\n\n # Create an instance of the spectral_data class\n spectral_array = Spectral_data(array_data=array_data, max_wavelength=max_wl,\n min_wavelength=min_wl, d_type=header_dict[\"data type\"],\n wavelength_dict=wavelength_dict, samples=int(header_dict[\"samples\"]),\n lines=int(header_dict[\"lines\"]), interleave=header_dict[\"interleave\"],\n wavelength_units=header_dict[\"wavelength units\"], array_type=\"datacube\",\n pseudo_rgb=pseudo_rgb, filename=filename)\n\n # Reset debug mode\n params.debug = debug\n\n if params.debug == \"plot\":\n # Gamma correct pseudo_rgb image\n plot_image(pseudo_rgb)\n elif params.debug == \"print\":\n print_image(pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + \"_pseudo_rgb.png\"))\n\n return spectral_array", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def Load(self):\n\t\tfile = open(self.fileName, 'r')\n\t\tself.hdr = file.readline().split('\\n')[0].split(',')\n\t\t\n\t\tfor line in file.readlines():\n\t\t\ttokens = line.split('\\n')[0].split(',')\n\t\t\tif int(tokens[1]) == 0:\n\t\t\t\tself.h0.append(tokens[0])\n\t\t\telse:\n\t\t\t\tself.h1.append(tokens[0])\n\t\tfile.close()\n\t\tself.numH1 = len(self.h1)\n\t\tself.numH0 = len(self.h0)", "def readDataFromFile():\n image_size = 28 # each image is 28x28\n\n num_images = 60000 # there are 60k images\n with gzip.open(r'train-images-idx3-ubyte.gz', 'r') as f: # 60k train & valid\n f.read(16) # reading by 16-byte double\n buffer_Train_Images = f.read(image_size * image_size * num_images)\n f.close()\n data_Train_Images = np.frombuffer(buffer_Train_Images, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n data_Train_Images = data_Train_Images.reshape(num_images,\n image_size * image_size) # Data = 60k x 28 x 28 with 1 value in it\n\n with gzip.open('train-labels-idx1-ubyte.gz', 'r') as f: # 60k train & valid - labels\n f.read(8) # reading by 16-byte double\n buffer_Train_Labels = f.read(num_images)\n data_Train_Labels = np.frombuffer(buffer_Train_Labels, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n num_images = 10000 # there are 10k images\n with gzip.open('t10k-images-idx3-ubyte.gz', 'r') as f: # 10k tests\n f.read(16) # reading by 16-byte double\n buffer_Test_Image = f.read(image_size * image_size * num_images)\n data_Test_Image = np.frombuffer(buffer_Test_Image, dtype=np.uint8).astype(\n np.uint8) # translating into 0 to 255\n data_Test_Image = data_Test_Image.reshape(num_images, image_size * image_size) # Data = 60k x 28 x 28 with\n\n with gzip.open('t10k-labels-idx1-ubyte.gz', 'r') as f: # 10k tests - lbles\n f.read(8) # reading by 16-byte double\n buffer_Test_Label = f.read(num_images)\n data_Test_Labels = np.frombuffer(buffer_Test_Label, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n return data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels", "def process_raw_data_aeris_maybe(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack, shift,\n maxSpeed='45',\n minSpeed='2'):\n import os\n import sys\n if sys.platform == 'win32':\n windows = True\n elif sys.platform != 'win32':\n windows = False\n\n import pandas as pd\n import datetime\n import os\n import gzip\n import numpy as np\n # import csv\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n xMinCarSpeed = -10\n ########################################################################\n #### WE DON'T HAVE AN RSSI INPUT\n ### (SO THIS IS A PLACEHOLDER FOR SOME SORT OF QA/QC VARIABLE)\n ## xMinRSSI = 50 #if RSSI is below this we don't like it\n ##################################################################\n\n # reading in the data with specific headers\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n sHeader = 'Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude,U (m/sec),V (m/sec),W (m/sec),T (degC),Dir (deg),Speed (m/sec),Compass (deg)'\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n infoHeader = \"FILENAME\\n\"\n # somehow gZIP is indicating if it is the first file name (I think if it is 0 then it is the first file)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename,\n 'r') # if in python 3, change this to \"r\" or just \"b\" can't remember but something about a bit not a string\n else:\n f = open(xDir + xFilename, 'r')\n\n infoHeader = \"FILENAME\\n\"\n\n import pandas as pd\n\n\n # process - if first time on this car/date, then write header out\n headerNames = sHeader.split(',')\n # xdat = str('20') + xFilename[10:16]\n xdat = xDate\n fnOut = xOut + xCar + \"_\" + '20'+ xdat + \"_dat.csv\" # set CSV output for raw data\n removeOut = xOut + xCar + \"_\" + '20'+ xdat + \"_removed.csv\"\n fnLog = xOut + xCar + \"_\" + '20'+ xdat + \".log\" # output for logfile\n infOut = xOut + xCar + \"_\" +'20'+ xdat + \"_info.csv\"\n #\n\n # dtime = open(xDir + xFilename).readlines().pop(2).split(',')[0]\n # firstdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n # int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n # firsttime = firstdate.strftime('%s.%f')\n # firsttime = dt_to_epoch(firstdate)\n\n #firsttime = float(open(xDir + xFilename).readlines().pop(2).split(',')[0])\n\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n ## read in file\n\n tempFile = pd.read_csv(xDir + \"/\" + xFilename, delimiter=\",\")\n #dtime = tempFile.loc[2,'Time Stamp'].split(',')[0]\n\n #firstdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n # int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n\n tempFile['dtime'] = tempFile.apply(lambda x: x['Time Stamp'].split(',')[0],axis=1)\n tempFile['datime'] = tempFile.apply(lambda x: datetime.datetime(int(x.dtime[6:10]), int(x.dtime[0:2]), int(x.dtime[3:5]), int(x.dtime[11:13]),\n int(x.dtime[14:16]), int(x.dtime[17:19]), int(float(x.dtime[19:23]) * 1000000)),axis=1)\n tempFile['DATE'] = tempFile.apply(lambda x: x.datime.strftime('%Y-%m-%d'),axis=1)\n tempFile['TIME'] = tempFile.apply(lambda x: x.datime.strftime('%H:%M:%S'),axis=1)\n tempFile['nearest10hz'] = tempFile.apply(lambda x: round(float(x.datime.timestamp()),1),axis=1)\n tempFile['SECONDS'] = tempFile.apply(lambda x: int(float(str(x.nearest10hz)[11:]) * 1e9), axis=1)\n tempFile1 = tempFile.copy().sort_values(by='nearest10hz',ascending=True).reset_index(drop=True)\n tempFile1['nearest10hz'] = tempFile1.loc[:,'nearest10hz'].astype(float)\n tempFile1['nearest10hz'] = tempFile1.loc[:,'nearest10hz'].astype(str)\n\n del(tempFile)\n tempFile = tempFile1.copy()\n\n tempFile = tempFile.rename(columns={\n 'T (degC)':'T',\n 'Inlet Number':'inletNumber',\n 'P (mbars)':'P',\n 'CH4 (ppm)':'CH4',\n 'H2O (ppm)':'H2O',\n 'C2H6 (ppb)':'C2H6',\n 'C2/C1':'C1C2',\n 'Battery Charge (V)':'batteryCharge',\n 'Power Input (mV)':'powerInput',\n 'Current (mA)':'current',\n 'SOC (%)':'SOC','Time Stamp':'TimeStamp',\n 'Compass (deg)':'CompassDeg',\n 'Speed (m/sec)':'ws',\n 'Dir (deg)': 'winddir',\n 'U (m/sec)':'U',\n 'V (m/sec)': 'V',\n 'Latitude':'LAT',\n 'Longitude':'LONG',\n 'W (m/sec)':'W'})\n #tempFile1 = tempFile.copy().sort_values('nearest10hz').reset_index(drop=True)\n radians = False\n\n wind_df_temp = tempFile.copy()\n wind_df_temp['ttot'] = wind_df_temp.apply(lambda x: float(x.nearest10hz),axis=1)\n wind_df = wind_df_temp.copy().sort_values(by='ttot',ascending=True).reset_index(drop=True)\n\n wind_df['QUADRANT'] = wind_df.apply(lambda row: get_quadrant(row['U'], row['V']), axis=1)\n wind_df['prev_LAT'] = wind_df.LAT.shift(periods=1)\n wind_df['next_LAT'] = wind_df.LAT.shift(periods=-1)\n wind_df['prev_LONG'] = wind_df.LONG.shift(periods=1)\n wind_df['next_LONG'] = wind_df.LONG.shift(periods=-1)\n wind_df['prev_TIME'] = wind_df.ttot.shift(periods=1)\n wind_df['next_TIME'] = wind_df.ttot.shift(periods=-1)\n\n wind_df['distance'] = wind_df.apply(\n lambda row: haversine(row['prev_LAT'], row['prev_LONG'], row['next_LAT'], row['next_LONG']), axis=1)\n\n wind_df['bearing'] = wind_df.apply(\n lambda row: calc_bearing(row['prev_LAT'], row['next_LAT'], row['prev_LONG'], row['next_LONG'], radians),\n axis=1)\n wind_df['timediff'] = wind_df.apply(lambda row: row['next_TIME'] - row['prev_TIME'], axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda row:calc_velocity(row['timediff'],row['distance']),axis=1)\n\n\n try:\n wind_df['W'] = wind_df['W'].astype(float)\n except:\n wind_df_other = wind_df.copy()\n wind_df_other['W'] = wind_df_other.apply(lambda x: 0 if x.W == 'XX.X' else x.W, axis=1)\n wind_df_other['fW'] = wind_df_other[\"W\"].str.split(\".\", n=1, expand=True)[0]\n wind_df_other1 = wind_df_other.loc[wind_df_other['fW'].notnull(),].reset_index(drop=True)\n wind_df_other1['firstW'] = wind_df_other1.apply(lambda x: int(x['fW']), axis=1)\n wind_df_other1['sW'] = wind_df_other1[\"W\"].str.split(\".\", n=1, expand=True)[1]\n wind_df_other2 = wind_df_other1.loc[wind_df_other1['sW'].notnull(),].reset_index(drop=True)\n wind_df_other2['secW'] = wind_df_other2.apply(lambda x: int(x['sW']), axis=1)\n wind_df_other2['wloc'] = wind_df_other2.apply(lambda x: float(str(x.firstW) + '.' + str(x.secW)), axis=1)\n wind_df_other3 = wind_df_other2.drop(columns=['W', 'secW', 'sW', 'fW', 'firstW'])\n del (wind_df)\n wind_df4 = wind_df_other3.rename(columns={'wloc': 'W'})\n wind_df = wind_df4.copy()\n del (wind_df4)\n\n try:\n wind_df['U'] = wind_df['U'].astype(float)\n except:\n wind_df_other = wind_df.copy()\n wind_df_other['U'] = wind_df_other.apply(lambda x: 0 if x.U == 'XX.X' else x.U, axis=1)\n wind_df_other['fU'] = wind_df_other[\"U\"].str.split(\".\", n=1, expand=True)[0]\n wind_df_other1 = wind_df_other.loc[wind_df_other['fU'].notnull(),].reset_index(drop=True)\n wind_df_other1['firstU'] = wind_df_other1.apply(lambda x: int(x['fU']), axis=1)\n wind_df_other1['sU'] = wind_df_other1[\"U\"].str.split(\".\", n=1, expand=True)[1]\n wind_df_other2 = wind_df_other1.loc[wind_df_other1['sU'].notnull(),].reset_index(drop=True)\n wind_df_other2['secU'] = wind_df_other2.apply(lambda x: int(x['sU']), axis=1)\n wind_df_other2['uloc'] = wind_df_other2.apply(lambda x: float(str(x.firstU) + '.' + str(x.secU)), axis=1)\n wind_df_other3 = wind_df_other2.drop(columns=['U', 'secU', 'sU', 'fU', 'firstU'])\n del (wind_df)\n wind_df4 = wind_df_other3.rename(columns={'uloc': 'U'})\n wind_df = wind_df4.copy()\n del (wind_df4)\n\n try:\n wind_df['V'] = wind_df['V'].astype(float)\n except:\n wind_df_other = wind_df.copy()\n wind_df_other['V'] = wind_df_other.apply(lambda x: 0 if x.V == 'XX.X' else x.V, axis=1)\n wind_df_other['fV'] = wind_df_other[\"V\"].str.split(\".\", n=1, expand=True)[0]\n wind_df_other1 = wind_df_other.loc[wind_df_other['fV'].notnull(),].reset_index(drop=True)\n wind_df_other1['firstV'] = wind_df_other1.apply(lambda x: int(x['fV']), axis=1)\n wind_df_other1['sV'] = wind_df_other1[\"V\"].str.split(\".\", n=1, expand=True)[1]\n wind_df_other2 = wind_df_other1.loc[wind_df_other1['sV'].notnull(),].reset_index(drop=True)\n wind_df_other2['secV'] = wind_df_other2.apply(lambda x: int(x['sV']), axis=1)\n wind_df_other2['vloc'] = wind_df_other2.apply(lambda x: float(str(x.firstV) + '.' + str(x.secV)), axis=1)\n wind_df_other3 = wind_df_other2.drop(columns=['V', 'secV', 'sV', 'fV', 'firstV'])\n del (wind_df)\n wind_df4 = wind_df_other3.rename(columns={'vloc': 'V'})\n wind_df = wind_df4.copy()\n del (wind_df4)\n\n\n wind_df['U_cor'] = wind_df.apply(lambda row: float(row['U']) + float(row['VELOCITY']), axis=1)\n wind_df['horz_length'] = wind_df.apply(lambda row: np.sqrt(row['U_cor'] ** 2 + row['V'] ** 2), axis=1)\n wind_df['uncor_theta'] = wind_df.apply(\n lambda row: calc_bearing(row['U_cor'], row['V'], row['QUADRANT'], row['horz_length'], radians), axis=1)\n wind_df['adj_theta'] = wind_df.apply(lambda row: (row['uncor_theta'] + row['bearing']) % 360, axis=1)\n wind_df['totalWind'] = wind_df.apply(lambda row: np.sqrt(row['horz_length'] ** 2 + row['W'] ** 2), axis=1)\n wind_df['phi'] = wind_df.apply(lambda row: np.arctan(row['horz_length']), axis=1)\n\n wind_df['adj_v'] = wind_df.apply(lambda row: -row['horz_length'] * np.cos(row['adj_theta']), axis=1)\n wind_df['adj_u'] = wind_df.apply(lambda row: row['horz_length'] * np.sin(row['adj_theta']), axis=1)\n\n ## GO THROUGH WIND\n window_size = 30\n u_series = pd.Series(wind_df['adj_u'])\n u_windows = u_series.rolling(window_size)\n u_averages = pd.DataFrame(u_windows.mean())\n u_averages.columns = ['U_avg']\n u_averages['key'] = u_averages.index\n\n v_series = pd.Series(wind_df['adj_v'])\n v_windows = v_series.rolling(window_size)\n v_averages = pd.DataFrame(v_windows.mean())\n v_averages.columns = ['V_avg']\n v_averages['key'] = v_averages.index\n\n w_series = pd.Series(wind_df['W'])\n w_windows = w_series.rolling(window_size)\n w_averages = pd.DataFrame(w_windows.mean())\n w_averages.columns = ['W_avg']\n w_averages['key'] = w_averages.index\n\n vw_df = w_averages.set_index('key').join(v_averages.set_index('key'))\n vw_df['key'] = vw_df.index\n uvw_df = vw_df.set_index('key').join(u_averages.set_index('key'))\n uvw_df['key'] = uvw_df.index\n wind_df2 = wind_df.copy()\n wind_df2['key'] = wind_df2.index\n wind_df = uvw_df.set_index('key').join(wind_df2.set_index('key'))\n\n wind_df['r_avg'] = wind_df.apply(lambda row: np.sqrt(row['U_avg'] ** 2 + row['V_avg'] ** 2), axis=1)\n wind_df['theta_avg'] = wind_df.apply(lambda row: np.arctan(-row['U_avg'] / row['V_avg']), axis=1)\n\n wind_df['shift_CH4'] = wind_df.CH4.shift(periods=int(float(shift)))\n wind_df['raw_CH4'] = wind_df.apply(lambda row: row['CH4'], axis=1)\n wind_df['BCH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['CH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['TCH4'] = wind_df.loc[:, ['shift_CH4']]\n\n wind_df['shift_R'] = wind_df.R.shift(periods=int(float(shift)))\n wind_df['raw_R'] = wind_df.apply(lambda row: row['R'], axis=1)\n\n wind_df2 = wind_df[wind_df.CH4.notnull()]\n\n wind_df2 = wind_df.copy()\n wind_df3 = wind_df2.drop(\n ['QUADRANT', 'prev_LAT', 'next_LAT', 'prev_LONG', 'next_LONG', 'prev_TIME', 'next_TIME',\n 'timediff', 'uncor_theta', 'CH4', 'R'], axis=1)\n wind_df3['CH4'] = wind_df3.loc[:, 'shift_CH4']\n wind_df3['R'] = wind_df3.loc[:, 'shift_R']\n wind_df3 = wind_df3.drop(['shift_CH4', 'shift_R'], axis=1)\n # wind_df4 = wind_df3.loc[wind_df3.totalWind.notnull(),:]\n wind_df3['odometer'] = wind_df3.loc[:, 'distance'].cumsum()\n\n if bFirst:\n # tempFile.sort_values('nearest10hz').reset_index(drop=True).to_csv(fnOutTemp)\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n\n if not bFirst:\n # fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n # fLog.write(\"Processing file: \" + str(xFilename) + \"\\n\")\n\n wind_df4 = wind_df3.copy()\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > -1, :]\n wrongSpeed = wind_df4.loc[wind_df4.VELOCITY <= xMinCarSpeed, :]\n wrongSpeed = wrongSpeed.assign(Reason='velocity too slow')\n\n # wind_df6 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :]\n wind_df6 = wind_df5.loc[wind_df5.VELOCITY < 1000, :]\n\n wrongSpeed2 = wind_df5.loc[wind_df5.VELOCITY >= xMaxCarSpeed, :]\n wrongSpeed2 = wrongSpeed2.assign(Reason='velocity too fast')\n\n wrongSpeeds = pd.concat([wrongSpeed, wrongSpeed2])\n # notGood = pd.concat([wrongSpeeds,nullCH4])\n notGood = pd.concat([wrongSpeeds])\n\n # wind_df6 = wind_df6a.loc[wind_df6a.R > .6999, :]\n\n del (wind_df4)\n wind_df4 = wind_df6.copy().drop_duplicates()\n wind_df5 = wind_df4.loc[wind_df4.CH4.notnull(), :]\n\n nullCH4 = wind_df4.loc[~wind_df4.CH4.notnull(), :]\n if nullCH4.shape[0] > 0:\n nullCH4 = nullCH4.assign(Reason='CH4 NA')\n removedDF = pd.concat([notGood, nullCH4])\n elif nullCH4.shape[0] == 0:\n removedDF = notGood\n wind_df4 = wind_df5.copy()\n\n def rolling_cor(df, first, sec, window, newname):\n if (window % 2 == 1):\n sidewind = (window - 1) / 2\n else:\n sidewind = window / 2\n\n length = df.shape[0]\n cor_i = []\n for i in range(length):\n if (((i) < sidewind) or (i >= (length - sidewind))):\n cor_i.append(0)\n else:\n try:\n cor_i.append(xvals.corr(yvals))\n xvals = df.loc[(i - sidewind):(i + sidewind + 1), first]\n yvals = df.loc[(i - sidewind):(i + sidewind + 1), sec]\n except:\n cor_i.append(-2)\n\n df.loc[:, newname] = cor_i\n return (df)\n def rolling_c2h6(df, colname, window, percentile, newname):\n import numpy as np\n if (window % 2 == 1):\n sidewind = (window - 1) / 2\n else:\n sidewind = window / 2\n\n length = df.shape[0]\n cor_i = []\n for i in range(length):\n if (((i) < sidewind) or (i >= (length - sidewind))):\n cor_i.append(0)\n else:\n try:\n c2h6vals = df.loc[(i - sidewind):(i + sidewind + 1), colname]\n cor_i.append(np.percentile(c2h6vals, percentile))\n except:\n cor_i.append(-999)\n df.loc[:, newname] = cor_i\n return (df)\n\n wind_df5 = rolling_cor(wind_df4, 'CH4', 'C2H6', 8, 'rollingR_8')\n wind_df6 = rolling_cor(wind_df5, 'CH4', 'C2H6', 15, 'rollingR_15')\n wind_df7 = rolling_cor(wind_df6, 'CH4', 'C2H6', 30, 'rollingR_30')\n wind_df8 = rolling_cor(wind_df7, 'CH4', 'C2H6', 45, 'rollingR_45')\n wind_df9 = rolling_cor(wind_df8, 'CH4', 'C2H6', 60, 'rollingR_60')\n\n wind_df10 = rolling_c2h6(wind_df9, 'C2H6', 30, 50, 'rollingc2h6_30')\n wind_df11 = rolling_c2h6(wind_df10, 'C2H6', 15, 50, 'rollingc2h6_15')\n wind_df12 = rolling_c2h6(wind_df11, 'C2H6', 45, 50, 'rollingc2h6_45')\n\n wind_df13 = rolling_c2h6(wind_df12, 'CH4', 45, 50, 'rollingch4_45')\n wind_df14 = rolling_c2h6(wind_df13, 'CH4', 30, 50, 'rollingch4_30')\n wind_df15 = rolling_c2h6(wind_df14, 'CH4', 15, 50, 'rollingch4_15')\n wind_df16 = rolling_c2h6(wind_df15, 'CH4', 60, 50, 'rollingch4_60')\n\n del (wind_df4)\n wind_df4 = wind_df16.copy()\n ## if you want to filter out high temperatures\n # wind_df4 = wind_df5.loc[wind_df5.TEMPC < 95, :].reset_index(drop=True)\n\n # fLog.write(\"Usable lines - \" + str(wind_df4.shape[0]) + \".\" + \"\\n\")\n # fLog.close()\n\n if bFirst:\n wind_df4.sort_values(by='ttot',ascending=True).reset_index(drop=True).to_csv(fnOut, index=False)\n removedDF.to_csv(removeOut, index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='ttot',ascending=True).reset_index(drop=True).to_csv(fnOut, index=False)\n removed = pd.read_csv(removeOut)\n pd.concat([removed, removedDF]).sort_values(by='ttot',ascending=True).reset_index(drop=True).to_csv(removeOut,\n index=False)\n\n # os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def __init__(self,datafile,crop=False,usepickled=True,upsample=None,medianfilt=False,remove_rohr=False,dtype=None):\n if not isinstance(datafile,str) and not isinstance(datafile, unicode):\n self.D = datafile\n return\n self.info = WurzelInfo(datafile)\n info = self.info\n try:\n os.mkdir(os.path.join(info.datapath, datafile.replace(\".dat\",\"\")))\n except OSError:pass\n if not info.has_rohr: remove_rohr = False\n if not info.has_rohr: medianfilt = False\n\n picklename = os.path.join(info.datapath, datafile.replace(\".dat\",\"\"), \"upsampled.pickle\")\n #if usepickled and os.path.exists(picklename):\n if os.path.exists(picklename):\n self.load(picklename)\n if not all([x==y for x,y in zip(self.D.shape, info.shape )]):\n print \"After loading pickle, dimensions do not match: \", self.D.shape, info.shape\n import sys\n sys.exit(1)\n else:\n try:\n with open(os.path.join(info.datapath, datafile)) as fd:\n self.D = np.fromfile(file=fd, dtype=info.read_dtype).reshape(info.read_shape).astype(\"float32\")\n except:\n with open(os.path.join(info.datapath, datafile)) as fd:\n self.D = np.fromfile(file=fd, dtype=dtype).reshape(info.shape).astype(\"float32\")\n if info.read_dtype in [np.uint8, \"uint8\"]:\n self.D /= 255.0\n if medianfilt: self.median_filter()\n if remove_rohr: self.get_rid_of_roehrchen()\n #assert self.D.min()>= 0\n self.D[self.D<0]=0\n self.upsample(upsample)\n if not medianfilt:\n cnt = (self.D<0).sum()\n print \"fraction below zero: \", cnt/np.prod(self.D.shape)\n self.D[self.D<0]=0 # this is an upsampling-artefact (hopefully)\n if not all([x==y for x,y in zip(self.D.shape, info.shape )]):\n print \"After resampling, dimensions do not match: \", self.D.shape, info.shape\n import sys\n sys.exit(1)\n\n if medianfilt or remove_rohr or upsample:\n print \"Saving upsampled as \", picklename\n self.save(picklename)", "def read_meta(self):\n meta = cPickle.load(open('../sugar_analysis_data/META-CABALLO2.pkl'))\n self.meta_sn_name_list = []\n self.meta_zcmb = []\n self.meta_x0 =[]\n self.meta_x0_err = []\n self.meta_x1 =[]\n self.meta_x1_err = []\n self.meta_c = []\n self.meta_c_err = []\n self.meta_mb = []\n self.meta_mb_err = []\n self.meta_cov_x0_x1 = [] \n self.meta_cov_x0_c = []\n self.meta_cov_x1_c = []\n self.meta_cov_mb_x1 = []\n self.meta_cov_mb_c = [] \n self.meta_zhl = []\n self.meta_zhl_err = []\n self.meta_idr = []\n for meta_sn_name in meta.keys(): \n \n if meta[meta_sn_name]['idr.subset'] != 'bad' and meta[meta_sn_name]['idr.subset'] != 'auxiliary':\n \n self.meta_sn_name_list.append(meta_sn_name)\n self.meta_zhl_err.append(meta[meta_sn_name]['host.zhelio.err'])\n self.meta_zhl.append(meta[meta_sn_name]['host.zhelio'])\n self.meta_zcmb.append(meta[meta_sn_name]['host.zcmb'])\n self.meta_x0.append(meta[meta_sn_name]['salt2.X0'])\n self.meta_x0_err.append(meta[meta_sn_name]['salt2.X0.err'])\n self.meta_x1.append(meta[meta_sn_name]['salt2.X1'])\n self.meta_x1_err.append(meta[meta_sn_name]['salt2.X1.err'])\n self.meta_c.append(meta[meta_sn_name]['salt2.Color'])\n self.meta_c_err.append(meta[meta_sn_name]['salt2.Color.err'])\n self.meta_mb.append(meta[meta_sn_name]['salt2.RestFrameMag_0_B'])\n self.meta_mb_err.append(meta[meta_sn_name]['salt2.RestFrameMag_0_B.err'])\n self.meta_cov_x0_x1.append(meta[meta_sn_name]['salt2.CovX0X1'])\n self.meta_cov_x0_c.append(meta[meta_sn_name]['salt2.CovColorX0'])\n self.meta_cov_x1_c.append(meta[meta_sn_name]['salt2.CovColorX1'])\n self.meta_cov_mb_x1.append(meta[meta_sn_name]['salt2.CovRestFrameMag_0_BX1'])\n self.meta_cov_mb_c.append(meta[meta_sn_name]['salt2.CovColorRestFrameMag_0_B'])\n self.meta_idr.append(meta[meta_sn_name]['idr.subset'])\n \n self.meta_idr = np.array(self.meta_idr)\n self.meta_zcmb = np.array(self.meta_zcmb)\n self.meta_zhl = np.array(self.meta_zhl)\n self.meta_zhl_err = np.array(self.meta_zhl_err)\n self.meta_x0 = np.array(self.meta_x0)\n self.meta_x0_err = np.array(self.meta_x0_err)\n self.meta_x1 = np.array(self.meta_x1)\n self.meta_x1_err = np.array(self.meta_x1_err) \n self.meta_c = np.array(self.meta_c)\n self.meta_c_err = np.array(self.meta_c_err)\n self.meta_mb = np.array(self.meta_mb)\n self.meta_mb_err = np.array(self.meta_mb_err)\n self.meta_cov_x0_x1 = np.array(self.meta_cov_x0_x1)\n self.meta_cov_x0_c = np.array(self.meta_cov_x0_c)\n self.meta_cov_x1_c = np.array(self.meta_cov_x1_c)\n self.meta_cov_mb_x1 = np.array(self.meta_cov_mb_x1)\n self.meta_cov_mb_c = np.array(self.meta_cov_mb_c)", "def load_data(self):", "def read_mb_file(self,idir='.',ifile=None, gmt=True, verbose=False):\n \n import numpy as np\n import os\n \n if gmt==True:\n gmt_file=idir+'/../maps/en_velo.gmt'\n if isinstance(gmt,str):\n gmt_file=gmt\n \n if gmt != False:\n self.read_lon_lat(gmt_file,verbose=verbose)\n \n if ifile is None:\n mb_file_basename= idir + '/mb_'+self.code+'_GPS.dat'\n else:\n mb_file_basename=ifile\n \n data_NEU = []\n for i in range(1,4):\n mb_file = mb_file_basename + str(i)\n\n # file\n self.ifile=os.path.abspath(mb_file)\n \n data=np.genfromtxt(mb_file,skip_header=4)\n \n # reshape to ensure a 2D array\n if len(data.shape)==1:\n data=data.reshape((1,data.shape[0]))\n \n\n\n data_NEU.append(data)\n\n if data_NEU[0].shape == data_NEU[1].shape == data_NEU[2].shape:\n self.data=np.zeros((data_NEU[0].shape[0],7))\n self.data[:,0]=data_NEU[0][:,0]\n self.data[:,1]=data_NEU[0][:,1]#*to_mm\n self.data[:,2]=data_NEU[1][:,1]#*to_mm\n self.data[:,3]=data_NEU[2][:,1]#*to_mm\n\n self.data[:,4]=data_NEU[0][:,2]#*to_mm\n self.data[:,5]=data_NEU[1][:,2]#*to_mm\n self.data[:,6]=data_NEU[2][:,2]#*to_mm\n\n else: \n print(\"!!! Error reading \",mb_file_basename,\" :*dat1, *dat2, *dat3 do not have the same length\")\n self.data = None", "def updateHeaderComputedValues( self ):\n self.nAvgBytesPerSec = int( self.nNbrChannel*self.nSamplingRate*self.nNbrBitsPerSample/8 )\n self.nSizeBlockAlign = int( self.nNbrChannel*self.nNbrBitsPerSample/8 )\n self.dataType = Wav.getDataType( self.nNbrBitsPerSample )", "def read_data_small(filename):\n with bz2.BZ2File(filename) as f:\n data = []\n file_size = os.stat(filename).st_size\n chunk_size = 1024 * 1024 # 限制读取的数据\n print('Reading data...')\n for i in range(int(ceil(file_size // chunk_size) + 1)):\n bytes_to_read = min(chunk_size, file_size - (i * chunk_size))\n file_string = f.read(bytes_to_read).decode('utf-8')\n file_string = file_string.lower()\n file_string = nltk.word_tokenize(file_string) # nltk 提供的分词器\n data.extend(file_string)\n return data", "def initiallize_buffer(self):\n assert os.path.isdir(self.directory)\n #sorting files topologically, files' format is -> data_num.h5 \n files_list = sorted(os.listdir(self.directory + '/' + self.name + '/'), key = lambda x: int(x.split(\"_\")[1].split(\".\")[0]))\n self.files_counter = 0\n if files_list != []: \n for file_name in files_list:\n self.memorize(name = file_name, error = 1)\n self.files_counter += 1\n self.files_tracker = file_name\n else:\n self.files_tracker = 'data_-1.h5'", "def __init__(self, path, **kwargs):\n super().__init__(path, **kwargs)\n\n self._largeImagePath = self._getLargeImagePath()\n self._lastOpenSourceLock = threading.RLock()\n # 'c' must be first as channels are special because they can have names\n self._axesList = ['c', 'z', 't', 'xy']\n if not os.path.isfile(self._largeImagePath):\n try:\n possibleYaml = self._largeImagePath.split('multi://', 1)[-1]\n self._info = yaml.safe_load(possibleYaml)\n self._validator.validate(self._info)\n self._basePath = Path('.')\n except Exception:\n raise TileSourceFileNotFoundError(self._largeImagePath) from None\n else:\n try:\n with builtins.open(self._largeImagePath) as fptr:\n start = fptr.read(1024).strip()\n if start[:1] not in ('{', '#', '-') and (start[:1] < 'a' or start[:1] > 'z'):\n msg = 'File cannot be opened via multi-source reader.'\n raise TileSourceError(msg)\n fptr.seek(0)\n try:\n import orjson\n self._info = orjson.loads(fptr.read())\n except Exception:\n fptr.seek(0)\n self._info = yaml.safe_load(fptr)\n except (json.JSONDecodeError, yaml.YAMLError, UnicodeDecodeError):\n msg = 'File cannot be opened via multi-source reader.'\n raise TileSourceError(msg)\n try:\n self._validator.validate(self._info)\n except jsonschema.ValidationError:\n msg = 'File cannot be validated via multi-source reader.'\n raise TileSourceError(msg)\n self._basePath = Path(self._largeImagePath).parent\n self._basePath /= Path(self._info.get('basePath', '.'))\n for axis in self._info.get('axes', []):\n if axis not in self._axesList:\n self._axesList.append(axis)\n self._collectFrames()", "def read_data(self, f):\n\n f.seek(self.offset)\n # assume files are small enough to fit in memory\n data = f.read(self.compressed_size)\n if self.type == 0:\n return data\n elif self.type == 1:\n return gzip.decompress(data)\n elif self.type == 2:\n n, = struct.unpack('<L', data[:4])\n target = data[4:4+n].rstrip(b'\\0').decode('utf-8')\n logger.debug(f\"file redirection: {target}\")\n return None\n elif self.type == 3:\n return zstd_decompress(data)\n raise ValueError(f\"unsupported file type: {self.type}\")", "def analyze_pressure_dump(filename, Lx=200., Ly=200, Lz=900., N=10, bin_divide_flag=False, Natoms=113579):\n myfile = open(filename+'.txt')\n trajectory = []\n traj_pd = []\n frames = []\n\n for _ in range(3):\n next(myfile)\n count = 0\n while EOF(myfile):\n count += 1\n s = next(myfile) # info with the time step\n\n x = np.zeros(N, dtype=[('Chunk',np.float32), ('Coord1',np.float32), ('Ncount',np.float32), ('density',np.float32), ('temp',np.float32), ('vx',np.float32), ('fx',np.float32),('c_pciKE[1]',np.float32), ('c_pciKE[2]',np.float32), ('c_pciKE[3]',np.float32), ('c_pciVIR[1]',np.float32), ('c_pciVIR[2]',np.float32), ('c_pciVIR[3]',np.float32), ('c_pgelELAS[1]',np.float32), ('c_pgelELAS[2]',np.float32), ('c_pgelELAS[3]',np.float32), ('c_pgelVIR[1]', np.float32), ('c_pgelVIR[2]', np.float32), ('c_pgelVIR[3]', np.float32), ('c_pgelPAIR[1]', np.float32), ('c_pgelPAIR[2]', np.float32), ('c_pgelPAIR[3]', np.float32)])\n\n# Chunk Coord1 Ncount density/number temp vx fx c_pciKE[1] c_pciKE[2] c_pciKE[3] c_pciVIR[1] c_pciVIR[2] c_pciVIR[3] c_pgelELAS[1] c_pgelELAS[2] c_pgelELAS[3] c_pgelVIR[1] c_pgelVIR[2] c_pgelVIR[3] c_pgelPAIR[1] c_pgelPAIR[2] c_pgelPAIR[3]\n\n list_line = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", s)\n frame, _, _ = list_line\n frames.append(int(frame))\n # print( \"reading lines\")\n\n for i in xrange(N):\n count += 1\n s = next(myfile)\n list_line = re.findall(\"[-+]?\\d+[\\.]?\\d*[eE]?[-+]?\\d*\", s)\n # print( \"reading line\", i, list_line)\n for il, l in enumerate(list_line):\n x[i][il] = float(l)\n\n trajectory.append(x)\n\n # names = x.dtype.fields.keys()\n # data = x.dtype.fields.values()\n\n df = pd.DataFrame.from_records(x)\n traj_pd.append(df)\n\n myfile.close()\n\n\n\n # # volume = 218.*44.*44.\n volume = Lx*Ly*Lz\n # N_atoms = 113579\n # if bin_divide_flag:\n # bin_volume = volume / float(N)\n # else:\n # bin_volume = 1.\n\n bin_volume = volume / float(N)\n # bin_volume = volume\n # bin_volume /= float(Natoms)\n\n Combine_PD = pd.concat(traj_pd)\n FINAL_PD = pd.DataFrame()\n\n FINAL_PD['Coord1'] = Combine_PD['Coord1']\n FINAL_PD['p_ciKE'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pciKE[1]'] + Combine_PD['c_pciKE[2]'] + Combine_PD['c_pciKE[3]'])/(3.*bin_volume)\n FINAL_PD['p_ciVIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pciVIR[1]'] + Combine_PD['c_pciVIR[2]'] + Combine_PD['c_pciVIR[3]'])/(3.*bin_volume)\n FINAL_PD['p_gelELAS'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelELAS[1]'] + Combine_PD['c_pgelELAS[2]'] + Combine_PD['c_pgelELAS[3]'])/(3.*bin_volume)\n\n FINAL_PD['p_gelVIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelVIR[1]'] + Combine_PD['c_pgelVIR[2]'] + Combine_PD['c_pgelVIR[3]'])/(3.*bin_volume)\n FINAL_PD['p_gelPAIR'] = -1 * Combine_PD['Ncount'] * (Combine_PD['c_pgelPAIR[1]'] + Combine_PD['c_pgelPAIR[2]'] + Combine_PD['c_pgelPAIR[3]'])/(3.*bin_volume)\n\n # So now I have to\n # P_bin = (sigma_per_atom_xx + ... + sigma_per_atom_zz)/(bin_volume*3)\n # *N_atoms_per_bin\n # N_atoms_per_bin = number_density*N_atoms\n\n\n df_concat = FINAL_PD\n\n by_row_index = df_concat.groupby(df_concat.index)\n df_means = by_row_index.mean()\n by_row_index_2 = df_concat.groupby(df_concat.index)\n df_stds = by_row_index_2.std()\n\n # print( df_means.head())\n # print( df_stds.head())\n return df_means, df_stds", "def __init__(self, resolver_context, path_spec):\n super(CompressedStreamFileSystem, self).__init__(\n resolver_context, path_spec)\n self._compression_method = None", "def runTest(self):\n nc = Dataset(self.file)\n data = nc['vl'][-1]\n # check max error of compression\n err = np.abs(data - self.data)\n assert(err.max() < nc['vl'].scale_factor)\n # turn off auto-scaling\n nc.set_auto_maskandscale(False)\n data = nc['vl'][-1]\n assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))\n nc.close()", "def __init__(self, fpath, prefix, suffix, globar_year=None):\n self.fpath = fpath\n prefix_len = len(prefix)\n suffix_len = len(suffix)\n self.year = globar_year\n self.year_str = str(globar_year)\n\n # filename example : good_2005_0.pgz\n files = [f for f in listdir(fpath) if isfile(join(fpath, f)) and\n (f[-suffix_len:] == suffix and f[:prefix_len] == prefix)]\n\n pat = '{0}_{1}'.format(prefix, self.year_str)\n last_batch = \\\n list(sorted(filter(lambda f: pat in f, files)))\n first_batch = \\\n list(sorted(filter(lambda f: pat not in f, files)))\n\n logging.info('in ChunkReader.__init__() : '\n '(last_batch) {0} files : {1}'.format(len(last_batch), ' '.join(last_batch)))\n logging.info('in ChunkReader.__init__() : '\n '(first_batch) {0} files : {1}'.format(len(first_batch), ' '.join(first_batch)))\n\n first_batch.extend(last_batch)\n # queue-like usage\n self.files = first_batch[::-1]\n logging.info('in ChunkReader.__init__ : '\n 'all files {0} files : {1}'.format(len(self.files), ' '.join(self.files)))", "def parse_file(self, path, max_resolution, threshold, proteins={}):\n\n \"\"\"\n create regex pattern here so it is not done repeatedly while parsing file\n\n groups:\n 0 - Protein ID\n 1 - Chain ID\n 2 - Length of protein chain\n 3 - Exptl.\n 4 - Resolution\n 5 - R-factor\n 6 - FreeRValue\n \"\"\"\n regex_str = '(\\w{4})(\\w)\\s+(\\d+)\\s+(\\w+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)'\n regex_pattern = re.compile(regex_str)\n\n printc('Processing: %s' % path)\n\n raw = None\n try:\n _file = gzip.open(path, 'r')\n\n #first line is labels, discard it\n _file.readline()\n\n for line in _file:\n match = regex_pattern.match(line)\n if match:\n groups = match.groups()\n\n if groups[0] in proteins:\n # if protein already exists just update the additional\n # chain information. The properties should not change\n # between records in the selection file.\n protein = proteins[groups[0]]\n if not groups[1] in protein['chains']:\n protein['chains'].append(groups[1])\n #print 'Selecting Protein: %s Chain: %s Threshold: %s' % (groups[0],groups[1], threshold)\n\n else:\n # protein is not in proteins dict yet create initial\n # structure from parsed properties.\n resolution = float(groups[4])\n if resolution > 0 and resolution <= max_resolution:\n proteins[groups[0]] = {\n 'code':groups[0],\n 'chains':[groups[1]],\n 'resolution':groups[4],\n 'rfactor':groups[5],\n 'rfree':groups[6],\n 'threshold':threshold\n }\n\n #print 'Selecting Protein: %s Chain: %s Threshold: %s' % (groups[0],groups[1], threshold)\n\n finally:\n if _file:\n _file.close()\n\n return proteins", "def test_large_import(self):\n # the original file (from the IDD) is a previous version of the file from\n # the data server for the gp03flmb platform\n self.create_sample_data_set_dir('node59p1_orig.dat', TELEM_DIR, 'node59p1.dat')\n self.assert_initialize()\n # one bad sample in here:\n # PH1236501_01D5u51F361E0_EC_162E has non ascii bytes at the end and is missing \\r\n result = self.data_subscribers.get_samples(DataParticleType.CONTROL, 1, 60)\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 49, 100)\n\n # this file is the more recent file off the data server for gp03flmb/d00001\n # this file appends more data to that in node59p1_orig\n self.create_sample_data_set_dir('node59p1.dat', TELEM_DIR)\n # several bad samples in here:\n # PH1236501_01D5u521208B4_A1_D274 doesn't have enough bytes (469 not 470)\n # PH1236501_01D5u52461BDC_CF_55BD doesn't have enough bytes (469 not 470)\n # PH1236501_01D5u5266BCF1_DA_6466 doesn't have enough bytes (469 not 470)\n # PH1236501_01DAu5288AF85_C9_7365, PH1236501_01DAu529E1BDF_42_4835\n # have extra bytes after the sample, not an error anymore\n # PH1236501_01D5u52B090DA_BA_8CC1 doesn't have enough bytes (469 not 470)\n # PH1236501_01DAu52B38839_BB_4134, PH1236501_01DAu52C8F493_34_3FC2\n # PH1236501_01DAu52ECE16B_79_F727, PH1236501_01DAu53024DC6_F2_7EC9 \n # have extra bytes after sample, not an error anymore\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 751, 430)", "def __init__(self, input_stream, threads=DEFAULT_THREADS, level=9):\n super(Pigz, self).__init__(\n input_stream,\n program=\"pigz\",\n threads=threads,\n level=level,\n suffix=\".gz\",\n )", "def process_raw_data_aeris(xCar, xDate, xDir, xFilename, bFirst, gZIP, xOut, initialTimeBack, shift, maxSpeed='45',\n minSpeed='2'):\n import os\n import sys\n if sys.platform == 'win32':\n windows = True\n elif sys.platform != 'win32':\n windows = False\n\n import pandas as pd\n from datetime import datetime\n import os\n import gzip\n import numpy as np\n # import csv\n try:\n xMaxCarSpeed = float(maxSpeed) / 2.23694 # CONVERTED TO M/S (default is 45mph)\n xMinCarSpeed = float(minSpeed) / 2.23694 # CONVERTED TO M/S (default is 2mph)\n xMinCarSpeed = -10\n ########################################################################\n #### WE DON'T HAVE AN RSSI INPUT\n ### (SO THIS IS A PLACEHOLDER FOR SOME SORT OF QA/QC VARIABLE)\n ## xMinRSSI = 50 #if RSSI is below this we don't like it\n ##################################################################\n\n # reading in the data with specific headers\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54\n sHeader = \"Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude\"\n sHeader = 'Time Stamp,Inlet Number,P (mbars),T (degC),CH4 (ppm),H2O (ppm),C2H6 (ppb),R,C2/C1,Battery Charge (V),Power Input (mV),Current (mA),SOC (%),Latitude,Longitude,U (m/sec),V (m/sec),W (m/sec),T (degC),Dir (deg),Speed (m/sec),Compass (deg)'\n sOutHeader = \"DATE,TIME,SECONDS,NANOSECONDS,VELOCITY,U,V,W,BCH4,BRSSI,TCH4,TRSSI,PRESS_MBAR,INLET,TEMPC,CH4,H20,C2H6,R,C2C1,BATTV,POWMV,CURRMA,SOCPER,LAT,LONG\\n\"\n infoHeader = \"FILENAME\\n\"\n # somehow gZIP is indicating if it is the first file name (I think if it is 0 then it is the first file)\n if gZIP == 0:\n f = gzip.open(xDir + \"/\" + xFilename,\n 'r') # if in python 3, change this to \"r\" or just \"b\" can't remember but something about a bit not a string\n else:\n f = open(xDir + xFilename, 'r')\n\n infoHeader = \"FILENAME\\n\"\n\n # process - if first time on this car/date, then write header out\n headerNames = sHeader.split(',')\n xdat = str('20') + xFilename[11:17]\n fnOut = xOut + xCar + \"_\" + xdat + \"_dat.csv\" # set CSV output for raw data\n removeOut = xOut + xCar + \"_\" + xdat + \"_removed.csv\"\n fnLog = xOut + xCar + \"_\" + xdat + \".log\" # output for logfile\n infOut = xOut + xCar + \"_\" + xdat + \"_info.csv\"\n #\n\n dtime = open(xDir + xFilename).readlines().pop(2).split(',')[0]\n firstdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n # firsttime = firstdate.strftime('%s.%f')\n firsttime = dt_to_epoch(firstdate)\n fnOutTemp = xOut + xCar + \"_\" + xdat + \"temp_dat.csv\" #\n\n if bFirst:\n fLog = open(fnLog, 'w')\n infOut = open(infOut, 'w')\n infOut.write(infoHeader)\n print(f\"fnLog:{fnOut}\")\n\n if not bFirst:\n fOut = open(fnOut, 'a')\n fLog = open(fnLog, 'a')\n infOut = open(infOut, 'a')\n\n fLog.write(\"Processing file: \" + str(xFilename) + \"\\n\")\n\n fOut = open(fnOutTemp, 'w')\n fOut.write(sOutHeader)\n\n # read all lines\n xCntObs = -1\n xCntGoodValues = 0\n for row in f:\n woo = row\n bGood = True\n if xCntObs != -1:\n lstS = row.split(\",\")\n if float(lstS[2]) < 20:\n bGood = False\n xCntObs += 1\n if xCntObs < 0:\n bGood = False\n xCntObs += 1\n if bGood:\n lstS = row.split(\",\")\n dtime = lstS[0]\n dateob = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n fdate = datetime(int(dtime[6:10]), int(dtime[0:2]), int(dtime[3:5]), int(dtime[11:13]),\n int(dtime[14:16]), int(dtime[17:19]), int(float(dtime[19:23]) * 1000000))\n # seconds = fdate.strftime('%s.%f')\n seconds = dt_to_epoch(fdate)\n\n def getNS(seconds):\n ns = str(float(seconds) * 1e-3)[11:]\n # str(pd.to_numeric(str(float(seconds) * 1e-3)[11:]) * 100000)[:9]\n return (str(ns).ljust(15, '0'))[:9]\n\n if len(lstS) > 6 and float(lstS[2]) > 20:\n csvWrite = str(dateob.strftime('%Y-%m-%d')) + ',' + str(\n dateob.strftime('%H:%M:%S')) + ',' + str(str(float(seconds) * 1e-3)[:10]) + ',' + getNS(\n seconds) + str(',')\n csvWrite += str(lstS[20]) + ',' + str(lstS[15]) + ',' + str(lstS[16]) + ',' + str(\n lstS[17]) + ',' + str(\n lstS[4]) + ',' + str('0') + ',' + str(lstS[4]) + ','\n csvWrite += str('0') + ',' + str(lstS[2]) + ',' + str(lstS[1]) + ',' + str(\n lstS[3]) + ',' + str(lstS[4]) + ',' + str(lstS[5]) + ',' + str(lstS[6]) + ','\n csvWrite += str(lstS[7]) + ',' + str(lstS[8]) + ',' + str(lstS[9]) + ',' + str(\n lstS[10]) + ',' + str(lstS[11]) + ',' + str(lstS[12]) + ',' + str(lstS[13]) + str(\n ',') + str(lstS[14]) + '\\n'\n fOut.write(csvWrite)\n xCntObs += 1\n fLog.write(\"Imported \" + str(xCntObs) + \" lines\" + \"\\n\")\n\n infOut.write(str(xFilename) + '\\n')\n fOut.close()\n # fLog.close()\n infOut.close()\n # print(xCar + \"\\t\" + xdat + \"\\t\" + fnOut[-22:] + \"\\t\" + str(xCntObs) + \"\\t\" + str(xCntGoodValues) + \"\\t\" + str(\n # gZIP))\n print(f\"{xCar} \\t {xdat} \\t {fnOut[-(17 + len(xCar)):]} \\t {xCntObs} \\t {xCntGoodValues} \\t {gZIP}\")\n\n wind_df = pd.read_csv(fnOutTemp)\n wind_df_not_null = wind_df.loc[wind_df['LAT'].notnull(),].reset_index(drop=True)\n wind_df_null = wind_df.loc[~wind_df['LAT'].notnull(),].reset_index(drop=True)\n if wind_df_null.shape[0] > 0:\n wind_df_null=wind_df_null.assign(Reason='GPS NA')\n\n del (wind_df)\n wind_df = wind_df_not_null.copy()\n\n radians = False\n wind_df['QUADRANT'] = wind_df.apply(lambda row: get_quadrant(row['U'], row['V']), axis=1)\n wind_df['secnan'] = wind_df.apply(lambda row: row['SECONDS'] + row['NANOSECONDS'] * 1e-9,\n axis=1) # + row['NANOSECONDS']*1e-9,axis=1)\n wind_df['prev_LAT'] = wind_df.LAT.shift(periods=1)\n wind_df['next_LAT'] = wind_df.LAT.shift(periods=-1)\n wind_df['prev_LONG'] = wind_df.LONG.shift(periods=1)\n wind_df['next_LONG'] = wind_df.LONG.shift(periods=-1)\n wind_df['prev_TIME'] = wind_df.secnan.shift(periods=1)\n wind_df['next_TIME'] = wind_df.secnan.shift(periods=-1)\n wind_df['distance'] = wind_df.apply(\n lambda row: haversine(row['prev_LAT'], row['prev_LONG'], row['next_LAT'], row['next_LONG']), axis=1)\n wind_df['bearing'] = wind_df.apply(\n lambda row: calc_bearing(row['prev_LAT'], row['next_LAT'], row['prev_LONG'], row['next_LONG'], radians),\n axis=1)\n wind_df['timediff'] = wind_df.apply(lambda row: row['next_TIME'] - row['prev_TIME'], axis=1)\n #wind_df['VELOCITY_calc'] = wind_df.apply(lambda row: row['distance']/row['timediff'],axis=1)\n wind_df['VELOCITY_calc'] = wind_df.apply(lambda row:calc_velocity(row['timediff'], row['distance']),axis=1)\n\n wind_df['VELOCITY'] = wind_df.apply(lambda x: (str(x.VELOCITY)), axis=1)\n wind_df['VELOCITY'] = wind_df.apply(lambda x: 0 if x.VELOCITY == 'XX.X' else x.VELOCITY, axis=1)\n wind_df['fVel'] = wind_df[\"VELOCITY\"].str.split(\".\", n=1, expand=True)[0]\n wind_df = wind_df.loc[wind_df['fVel'].notnull(),].reset_index(drop=True)\n wind_df['firstVel'] = wind_df.apply(lambda x: int(x['fVel']), axis=1)\n\n wind_df['sVel'] = wind_df[\"VELOCITY\"].str.split(\".\", n=1, expand=True)[1]\n wind_df = wind_df.loc[wind_df['sVel'].notnull(),].reset_index(drop=True)\n wind_df['secVel'] = wind_df.apply(lambda x: int(x['sVel']), axis=1)\n wind_df['vloc'] = wind_df.apply(lambda x: float(str(x.firstVel) + '.' + str(x.secVel)), axis=1)\n wind_df2 = wind_df.drop(columns=['VELOCITY', 'secVel', 'sVel', 'fVel', 'firstVel'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'vloc': 'VELOCITY'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n ## CORRECT W WIND THING\n wind_df['W'] = wind_df.apply(lambda x: (str(x.W)), axis=1)\n wind_df['W'] = wind_df.apply(lambda x: 0 if x.W == 'XX.X' else x.W, axis=1)\n wind_df['fW'] = wind_df[\"W\"].str.split(\".\", n=1, expand=True)[0]\n # wind_df = wind_df.loc[wind_df['fW'].notnull(),].reset_index(drop=True)\n wind_df['firstW'] = wind_df.apply(lambda x: int(x['fW']), axis=1)\n wind_df['sW'] = wind_df[\"W\"].str.split(\".\", n=1, expand=True)[1]\n # wind_df = wind_df.loc[wind_df['sW'].notnull(),].reset_index(drop=True)\n wind_df['secW'] = wind_df.apply(lambda x: int(x['sW']), axis=1)\n wind_df['wloc'] = wind_df.apply(lambda x: float(str(x.firstW) + '.' + str(x.secW)), axis=1)\n wind_df2 = wind_df.drop(columns=['W', 'secW', 'sW', 'fW', 'firstW'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'wloc': 'W'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n\n ## CORRECT U WIND THING\n wind_df['U'] = wind_df.apply(lambda x: (str(x.U)), axis=1)\n wind_df['U'] = wind_df.apply(lambda x: 0 if x.U == 'XX.X' else x.U, axis=1)\n wind_df['fU'] = wind_df[\"U\"].str.split(\".\", n=1, expand=True)[0]\n wind_df['firstU'] = wind_df.apply(lambda x: int(x['fU']), axis=1)\n wind_df['sU'] = wind_df[\"U\"].str.split(\".\", n=1, expand=True)[1]\n wind_df['secU'] = wind_df.apply(lambda x: int(x['sU']), axis=1)\n wind_df['uloc'] = wind_df.apply(lambda x: float(str(x.firstU) + '.' + str(x.secU)), axis=1)\n wind_df2 = wind_df.drop(columns=['U', 'secU', 'sU', 'fU', 'firstU'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'uloc': 'U'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n\n ## CORRECT V WIND THING\n wind_df['V'] = wind_df.apply(lambda x: (str(x.V)), axis=1)\n wind_df['V'] = wind_df.apply(lambda x: 0 if x.V == 'XX.X' else x.V, axis=1)\n wind_df['fV'] = wind_df[\"V\"].str.split(\".\", n=1, expand=True)[0]\n wind_df['firstV'] = wind_df.apply(lambda x: int(x['fV']), axis=1)\n wind_df['sV'] = wind_df[\"V\"].str.split(\".\", n=1, expand=True)[1]\n wind_df['secV'] = wind_df.apply(lambda x: int(x['sV']), axis=1)\n wind_df['vloc'] = wind_df.apply(lambda x: float(str(x.firstV) + '.' + str(x.secV)), axis=1)\n wind_df2 = wind_df.drop(columns=['V', 'secV', 'sV', 'fV', 'firstV'])\n del (wind_df)\n wind_df2 = wind_df2.rename(columns={'vloc': 'V'})\n wind_df = wind_df2.copy()\n del (wind_df2)\n\n wind_df['U_cor'] = wind_df.apply(lambda row: float(row['U']) + float(row['VELOCITY_calc']), axis=1)\n wind_df['horz_length'] = wind_df.apply(lambda row: np.sqrt(row['U_cor'] ** 2 + row['V'] ** 2), axis=1)\n wind_df['uncor_theta'] = wind_df.apply(\n lambda row: calc_bearing(row['U_cor'], row['V'], row['QUADRANT'], row['horz_length'], radians), axis=1)\n wind_df['adj_theta'] = wind_df.apply(lambda row: (row['uncor_theta'] + row['bearing']) % 360, axis=1)\n wind_df['totalWind'] = wind_df.apply(lambda row: np.sqrt(row['horz_length'] ** 2 + row['W'] ** 2), axis=1)\n wind_df['phi'] = wind_df.apply(lambda row: np.arctan(row['horz_length']), axis=1)\n\n wind_df['adj_v'] = wind_df.apply(lambda row: -row['horz_length'] * np.cos(row['adj_theta']), axis=1)\n wind_df['adj_u'] = wind_df.apply(lambda row: row['horz_length'] * np.sin(row['adj_theta']), axis=1)\n\n ## GO THROUGH WIND\n window_size = 30\n u_series = pd.Series(wind_df['adj_u'])\n u_windows = u_series.rolling(window_size)\n u_averages = pd.DataFrame(u_windows.mean())\n u_averages.columns = ['U_avg']\n u_averages['key'] = u_averages.index\n\n v_series = pd.Series(wind_df['adj_v'])\n v_windows = v_series.rolling(window_size)\n v_averages = pd.DataFrame(v_windows.mean())\n v_averages.columns = ['V_avg']\n v_averages['key'] = v_averages.index\n\n w_series = pd.Series(wind_df['W'])\n w_windows = w_series.rolling(window_size)\n w_averages = pd.DataFrame(w_windows.mean())\n w_averages.columns = ['W_avg']\n w_averages['key'] = w_averages.index\n\n vw_df = w_averages.set_index('key').join(v_averages.set_index('key'))\n vw_df['key'] = vw_df.index\n uvw_df = vw_df.set_index('key').join(u_averages.set_index('key'))\n uvw_df['key'] = uvw_df.index\n wind_df2 = wind_df.copy()\n wind_df2['key'] = wind_df2.index\n wind_df = uvw_df.set_index('key').join(wind_df2.set_index('key'))\n\n wind_df['r_avg'] = wind_df.apply(lambda row: np.sqrt(row['U_avg'] ** 2 + row['V_avg'] ** 2), axis=1)\n wind_df['theta_avg'] = wind_df.apply(lambda row: np.arctan(-row['U_avg'] / row['V_avg']), axis=1)\n\n wind_df['shift_CH4'] = wind_df.CH4.shift(periods=int(float(shift)))\n wind_df['raw_CH4'] = wind_df.apply(lambda row: row['BCH4'], axis=1)\n wind_df['BCH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['CH4'] = wind_df.loc[:, ['shift_CH4']]\n wind_df['TCH4'] = wind_df.loc[:, ['shift_CH4']]\n\n wind_df['shift_R'] = wind_df.R.shift(periods=int(float(shift)))\n wind_df['raw_R'] = wind_df.apply(lambda row: row['R'], axis=1)\n\n wind_df2 = wind_df[wind_df.CH4.notnull()]\n wind_df2_null = wind_df[~wind_df.CH4.notnull()]\n if wind_df2_null.shape[0] > 0:\n wind_df2_null=wind_df2_null.assign(Reason='GPS NA')\n nullCH4 = pd.concat([wind_df_null,wind_df2_null])\n\n\n wind_df2 = wind_df.copy()\n wind_df3 = wind_df2.drop(\n ['QUADRANT', 'secnan', 'prev_LAT', 'next_LAT', 'prev_LONG', 'next_LONG', 'prev_TIME', 'next_TIME',\n 'timediff', 'uncor_theta', 'CH4', 'R','VELOCITY'], axis=1)\n wind_df3['CH4'] = wind_df3.loc[:, 'shift_CH4']\n wind_df3['R'] = wind_df3.loc[:, 'shift_R']\n wind_df3 = wind_df3.drop(['shift_CH4', 'shift_R'], axis=1)\n # wind_df4 = wind_df3.loc[wind_df3.totalWind.notnull(),:]\n wind_df3['odometer'] = wind_df3.loc[:, 'distance'].cumsum()\n wind_df3a = wind_df3.copy().rename(columns = {'VELOCITY_calc':'VELOCITY'})\n wind_df4 = wind_df3a.loc[:,\n ['DATE', 'TIME', 'SECONDS', 'NANOSECONDS', 'VELOCITY', 'U', 'V', 'W', 'BCH4', 'BRSSI', 'TCH4',\n 'TRSSI',\n 'PRESS_MBAR', 'INLET', 'TEMPC', 'CH4', 'H20', 'C2H6', 'R', 'C2C1', 'BATTV', 'POWMV', 'CURRMA',\n 'SOCPER',\n 'LAT', 'LONG', 'bearing', 'U_cor', 'horz_length', 'adj_theta', 'totalWind',\n 'phi', 'raw_CH4', 'raw_R', 'U_avg', 'V_avg', 'W_avg', 'r_avg', 'theta_avg', 'distance', 'odometer']]\n\n # wind_df7 = add_odometer(wind_df4,'LAT','LONG')\n\n # wind_df4 = wind_df7.copy()\n #wind_df5 = wind_df4.loc[wind_df4.VELOCITY > xMinCarSpeed, :]\n wind_df5 = wind_df4.loc[wind_df4.VELOCITY > -1, :]\n\n wrongSpeed = wind_df4.loc[wind_df4.VELOCITY <= xMinCarSpeed,:]\n wrongSpeed=wrongSpeed.assign(Reason='velocity too slow')\n\n #wind_df6 = wind_df5.loc[wind_df5.VELOCITY < xMaxCarSpeed, :]\n wind_df6 = wind_df5.loc[wind_df5.VELOCITY < 1000, :]\n\n wrongSpeed2 = wind_df5.loc[wind_df5.VELOCITY >= xMaxCarSpeed, :]\n wrongSpeed2 = wrongSpeed2.assign(Reason='velocity too fast')\n\n wrongSpeeds = pd.concat([wrongSpeed,wrongSpeed2])\n\n\n notGood = pd.concat([wrongSpeeds,nullCH4])\n # wind_df6 = wind_df6a.loc[wind_df6a.R > .6999, :]\n\n del (wind_df4)\n wind_df4 = wind_df6.copy().drop_duplicates()\n wind_df5 = wind_df4.loc[wind_df4.CH4.notnull(), :]\n\n nullCH4 = wind_df4.loc[~wind_df4.CH4.notnull(), :]\n if nullCH4.shape[0] > 0:\n nullCH4 = nullCH4.assign(Reason='CH4 NA')\n removedDF = pd.concat([notGood,nullCH4])\n if nullCH4.shape[0]==0:\n removedDF = notGood\n wind_df4 = wind_df5.copy()\n\n ## if you want to filter out high temperatures\n #wind_df4 = wind_df5.loc[wind_df5.TEMPC < 95, :].reset_index(drop=True)\n\n fLog.write(\"Usable lines - \" + str(wind_df4.shape[0]) + \".\" + \"\\n\")\n fLog.close()\n\n if bFirst:\n wind_df4.to_csv(fnOut, index=False)\n removedDF.to_csv(removeOut,index=False)\n elif not bFirst:\n norm = pd.read_csv(fnOut)\n pd.concat([norm, wind_df4]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(fnOut, index=False)\n removed = pd.read_csv(removeOut)\n pd.concat([removed, removedDF]).sort_values(by='SECONDS').reset_index(drop=True).to_csv(removeOut, index=False)\n\n os.remove(fnOutTemp)\n return True\n except ValueError:\n return False", "def __init__(self,filePath,headerSymbols=['@','+']):\n if filePath.endswith('.gz'):\n self._file = gzip.open(filePath)\n else:\n self._file = open(filePath, 'rU')\n self._currentLineNumber = 0\n self._hdSyms = headerSymbols", "def __init__(self,filePath,headerSymbols=['@','+']):\n if filePath.endswith('.gz'):\n self._file = gzip.open(filePath)\n else:\n self._file = open(filePath, 'rU')\n self._currentLineNumber = 0\n self._hdSyms = headerSymbols", "def refreshDataSizeCrc(self):\n if isinstance(self,InstallerArchive):\n archiveRoot = GPath(self.archive).sroot\n else:\n archiveRoot = self.archive\n reReadMe = self.reReadMe\n docExts = self.docExts\n docDirs = self.docDirs\n dataDirsPlus = self.dataDirsPlus\n dataDirsMinus = self.dataDirsMinus\n skipExts = self.skipExts\n bethFiles = bush.bethDataFiles\n packageFiles = set(('package.txt','package.jpg'))\n unSize = 0\n espmNots = self.espmNots\n skipVoices = self.skipVoices\n off_local = self.off_local\n if espmNots and not skipVoices:\n skipEspmVoices = set(x.cs for x in espmNots)\n else:\n skipEspmVoices = None\n skipDistantLOD = settings['bash.installers.skipDistantLOD']\n hasExtraData = self.hasExtraData\n type = self.type\n if type == 2:\n allSubs = set(self.subNames[1:])\n activeSubs = set(x for x,y in zip(self.subNames[1:],self.subActives[1:]) if y)\n #--Init to empty\n self.readMe = self.packageDoc = self.packagePic = None\n for attr in ('skipExtFiles','skipDirFiles','espms'):\n object.__getattribute__(self,attr).clear()\n data_sizeCrc = {}\n skipExtFiles = self.skipExtFiles\n skipDirFiles = self.skipDirFiles\n espms = self.espms\n dest_src = {}\n #--Bad archive?\n if type not in (1,2): return dest_src\n #--Scan over fileSizeCrcs\n for full,size,crc in self.fileSizeCrcs:\n file = full #--Default\n if type == 2: #--Complex archive\n subFile = full.split('\\\\',1)\n if len(subFile) == 2:\n sub,file = subFile\n if sub not in activeSubs:\n if sub not in allSubs:\n skipDirFiles.add(file)\n continue\n rootPos = file.find('\\\\')\n extPos = file.rfind('.')\n fileLower = file.lower()\n rootLower = (rootPos > 0 and fileLower[:rootPos]) or ''\n fileExt = (extPos > 0 and fileLower[extPos:]) or ''\n #--Skip file?\n if (rootLower == 'omod conversion data' or \n fileLower[-9:] == 'thumbs.db' or fileLower[-11:] == 'desktop.ini'):\n continue #--Silent skip\n elif skipDistantLOD and fileLower[:10] == 'distantlod':\n continue\n elif skipVoices and fileLower[:11] == 'sound\\\\voice':\n continue\n elif file in bethFiles:\n skipDirFiles.add(full)\n continue\n elif not hasExtraData and rootLower and rootLower not in dataDirsPlus:\n skipDirFiles.add(full)\n continue\n elif hasExtraData and rootLower and rootLower in dataDirsMinus:\n skipDirFiles.add(full)\n continue\n elif fileExt in skipExts:\n skipExtFiles.add(full)\n continue\n #--Remap (and/or skip)\n dest = file #--Default. May be remapped below.\n #--Esps\n if not rootLower and reModExt.match(fileExt):\n pFile = pDest = GPath(file)\n if pFile in off_local:\n pDest = off_local[pFile]\n dest = pDest.s\n espms.add(pDest)\n if pDest in espmNots: continue\n #--Esp related voices (Oblivion)\n elif skipEspmVoices and fileLower[:12] == 'sound\\\\voice\\\\':\n farPos = file.find('\\\\',12)\n if farPos > 12 and fileLower[12:farPos] in skipEspmVoices:\n continue\n #--Docs\n elif rootLower in docDirs:\n dest = 'Docs\\\\'+file[rootPos+1:]\n elif not rootLower:\n maReadMe = reReadMe.match(file)\n if file.lower() == 'masterlist.txt':\n pass\n elif maReadMe:\n if not (maReadMe.group(1) or maReadMe.group(3)):\n dest = 'Docs\\\\%s%s' % (archiveRoot,fileExt)\n else:\n dest = 'Docs\\\\'+file\n self.readMe = dest\n elif fileLower == 'package.txt':\n dest = self.packageDoc = 'Docs\\\\'+archiveRoot+'.package.txt'\n elif fileLower == 'package.jpg':\n dest = self.packagePic = 'Docs\\\\'+archiveRoot+'.package.jpg'\n elif fileExt in docExts:\n dest = 'Docs\\\\'+file\n #--Save\n key = GPath(dest)\n data_sizeCrc[key] = (size,crc)\n dest_src[key] = full\n unSize += size\n self.unSize = unSize\n (self.data_sizeCrc,old_sizeCrc) = (data_sizeCrc,self.data_sizeCrc)\n #--Update dirty?\n if self.isActive and data_sizeCrc != old_sizeCrc:\n dirty_sizeCrc = self.dirty_sizeCrc\n for file,sizeCrc in old_sizeCrc.iteritems():\n if file not in dirty_sizeCrc and sizeCrc != data_sizeCrc.get(file):\n dirty_sizeCrc[file] = sizeCrc\n #--Done (return dest_src for install operation)\n return dest_src", "def read(files, save):\n\t# NOTE all soundings are size obs long, they must be filled in with zeros for this data format...\n\t# create the HDF5 document\n\tdoc = h5(save)\n\tsize = 450 # this hopefully exceeds the size of the arrays # CPIN Files are much shorter...\n\tdoc.create(pres=size, temp=size, dewpt=size, rh=size, r=size, u=size, v=size, z=size, lat=1, lon=1, theta=size, thte=size,\n\t\twspd=size, wdir=size, gamma=size, stab=size, N=size, rich=size, thtdef=size, cpin=size)\n\t# those last two do not have to be included...\n\t# Z=geopotenital height\n\n\t# now read the files!\n\tfor f in sorted(files):\n\t\tfname = f.split('/')[-1]\n\t\t# if 'smth' not in fname and NCAR not in fname: continue\n\t\tl.info('reading ' + fname)\n\t\t# launch time comes from line 2 of the file, the last element\n\t\tdf = open(f, 'r')\n\t\ttxt = df.read(2000).split('\\n') # way more than we need\n\t\tdf.close()\n\t\tlatln = txt[0].split() # keys 1,2 will be what we want\n\t\ttry:\n\t\t\ttm = s2t(txt[1].split()[-1] + 'UTC', '%Y%m%d%H%M%Z')\n\t\texcept:\n\t\t\t# drat.\n\t\t\tprint txt.split('\\n')[1]\n\t\t\tcontinue\n\t\ttry:\n\t\t\tif 'cpin' in fname:\n\t\t\t\tz, p, t, td, rh, r, wb, tv, tht, thte, thtw, ws, wd, u, v, vflg, gamma, stab, N, rich, thtdef, cpin = np.loadtxt(f, skiprows=4, unpack=True)\n\t\t\t\t# r is mixing ratio\n\t\t\telse:\n\t\t\t\tz, p, t, td, rh, r, wb, tv, tht, thte, thtw, ws, wd, u, v, vflg, gamma, stab, N, rich = np.loadtxt(f, skiprows=4, unpack=True)\n\t\t\t\t# r is mixing ratio\n\t\texcept:\n\t\t\tl.warning('This file could not be read')\n\t\t\tcontinue\n\n\t\t# and append this data! I will trust the time seconds, instead of recomputing the time\n\t\t# but, before that, we have to make them all the same size - size long\n\t\tnl = np.zeros(size - t.shape[0]) - 999.00 # -999 array to fluff the end\n\t\tp = np.concatenate((p, nl))\n\t\tt = np.concatenate((t, nl))\n\t\ttd = np.concatenate((td, nl))\n\t\trh = np.concatenate((rh, nl))\n\t\tr = np.concatenate((r, nl))\n\t\ttv = np.concatenate((tv, nl))\n\t\ttht = np.concatenate((tht, nl))\n\t\tthte = np.concatenate((thte, nl))\n\t\tws = np.concatenate((ws, nl))\n\t\twd = np.concatenate((wd, nl))\n\t\tgamma = np.concatenate((gamma, nl))\n\t\tstab = np.concatenate((stab, nl))\n\t\tN = np.concatenate((N, nl))\n\t\trich = np.concatenate((rich, nl))\n\t\tu = np.concatenate((u, nl))\n\t\tv = np.concatenate((v, nl))\n\t\tz = np.concatenate((z, nl))\n\t\tif 'cpin' in fname:\n\t\t\tcpin = np.concatenate((cpin, nl))\n\t\t\tthtdef = np.concatenate((thtdef, nl))\n\t\t\tdoc.append(tm, persist=True, pres=p, temp=t, dewpt=td, rh=rh, r=r, u=u, v=v, z=z, lat=[latln[1]], lon=[latln[2]],\n\t\t\t\ttheta=tht, thte=thte, wspd=ws, wdir=wd, gamma=gamma, stab=stab, N=N, rich=rich, cpin=cpin, thtdef=thtdef)\n\t\telse:\n\t\t\tdoc.append(tm, persist=True, pres=p, temp=t, dewpt=td, rh=rh, r=r, u=u, v=v, z=z, lat=[latln[1]], lon=[latln[2]],\n\t\t\t\ttheta=tht, thte=thte, wspd=ws, wdir=wd, gamma=gamma, stab=stab, N=N, rich=rich)\n\tdoc.close()", "def load_data(self) -> None:", "def __init__(self, path):\n self.side_len = None\n self.path = path\n self.src = rasterio.open(self.path)\n self.resolution_scaler = 2 / self.src.res[0]\n self.pad_val = 255 # for empty tiles on the edges of the frame", "def _load_disk(self):\r\n s = self.file_string + ' '\r\n im = Image.open(self.file_string)\r\n\r\n self.ix, self.iy = im.size\r\n s += '(%s)' % im.mode\r\n self.alpha = (im.mode == 'RGBA' or im.mode == 'LA')\r\n\r\n if self.mipmap:\r\n resize_type = Image.BICUBIC\r\n else:\r\n resize_type = Image.NEAREST\r\n\r\n # work out if sizes > MAX_SIZE or coerce to golden values in WIDTHS\r\n if self.iy > self.ix and self.iy > MAX_SIZE: # fairly rare circumstance\r\n im = im.resize((int((MAX_SIZE * self.ix) / self.iy), MAX_SIZE))\r\n self.ix, self.iy = im.size\r\n n = len(WIDTHS)\r\n for i in xrange(n-1, 0, -1):\r\n if self.ix == WIDTHS[i]:\r\n break # no need to resize as already a golden size\r\n if self.ix > WIDTHS[i]:\r\n im = im.resize((WIDTHS[i], int((WIDTHS[i] * self.iy) / self.ix)),\r\n resize_type)\r\n self.ix, self.iy = im.size\r\n break\r\n\r\n if VERBOSE:\r\n print('Loading ...{}'.format(s))\r\n\r\n if self.flip:\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n\r\n RGBs = 'RGBA' if self.alpha else 'RGB'\r\n self.image = im.convert(RGBs).tostring('raw', RGBs)\r\n self._tex = ctypes.c_int()\r\n if 'fonts/' in self.file_string:\r\n self.im = im", "def __init__(self, fname):\n f = zopen(fname, \"rt\")\n\n # skip header lines\n for i in range(2):\n f.readline()\n\n # number of atoms included in the file followed by the position of the origin of the volumetric data\n line = f.readline().split()\n self.natoms = int(line[0])\n self.origin = np.array(np.array(list(map(float, line[1:]))))\n\n # The next three lines give the number of voxels along each axis (x, y, z) followed by the axis vector.\n line = f.readline().split()\n self.NX = int(line[0])\n self.X = np.array([bohr_to_angstrom * float(l) for l in line[1:]])\n\n line = f.readline().split()\n self.NY = int(line[0])\n self.Y = np.array([bohr_to_angstrom * float(l) for l in line[1:]])\n\n line = f.readline().split()\n self.NZ = int(line[0])\n self.Z = np.array([bohr_to_angstrom * float(l) for l in line[1:]])\n\n self.voxelVolume = abs(np.dot(np.cross(self.X, self.Y), self.Z))\n self.volume = abs(np.dot(np.cross(self.X.dot(self.NZ), self.Y.dot(self.NY)), self.Z.dot(self.NZ)))\n\n # The last section in the header is one line for each atom consisting of 5 numbers,\n # the first is the atom number, second is charge, the last three are the x,y,z coordinates of the atom center.\n self.sites = []\n for i in range(self.natoms):\n line = f.readline().split()\n self.sites.append(Site(line[0], np.multiply(bohr_to_angstrom, list(map(float, line[2:])))))\n\n self.structure = Structure(\n lattice=[self.X * self.NX, self.Y * self.NY, self.Z * self.NZ],\n species=[s.specie for s in self.sites],\n coords=[s.coords for s in self.sites],\n coords_are_cartesian=True,\n )\n\n # Volumetric data\n self.data = np.zeros((self.NX, self.NY, self.NZ))\n i = 0\n for s in f:\n for v in s.split():\n self.data[\n int(i / (self.NY * self.NZ)),\n int((i / self.NZ) % self.NY),\n int(i % self.NZ),\n ] = float(v)\n i += 1", "def read(self, FN, multiplier=None):\n if FN is None:\n raise Exception('File is not defined')\n elif FN.endswith('.dx') or FN.endswith('.dx.gz'):\n data = self._read_dx(FN)\n elif FN.endswith('.nc'):\n data = self._read_nc(FN)\n else:\n raise Exception('File type not supported')\n if multiplier is not None:\n data['origin'] = multiplier * data['origin']\n data['spacing'] = multiplier * data['spacing']\n return data" ]
[ "0.6133056", "0.604979", "0.5808275", "0.57805765", "0.56968015", "0.55559903", "0.55156744", "0.5501627", "0.5496729", "0.54792094", "0.5473201", "0.54323983", "0.5407194", "0.5392547", "0.53821784", "0.5369768", "0.53655523", "0.5352017", "0.5350997", "0.5338158", "0.5336101", "0.53350955", "0.5324971", "0.5303069", "0.5297909", "0.52896005", "0.52772117", "0.52671933", "0.5261637", "0.52463406", "0.52450216", "0.5242225", "0.5231774", "0.5227676", "0.5225666", "0.5204984", "0.51991343", "0.5190511", "0.5187786", "0.5186243", "0.5186088", "0.5184608", "0.51670307", "0.5161795", "0.51586044", "0.51490676", "0.5147201", "0.51419055", "0.51361716", "0.51324296", "0.51262116", "0.51245624", "0.51165444", "0.5113928", "0.5110921", "0.51058733", "0.5105668", "0.51033306", "0.51011264", "0.50912756", "0.50908726", "0.50894004", "0.5088857", "0.5086581", "0.50802773", "0.5072437", "0.50632775", "0.50614023", "0.5058666", "0.5049813", "0.5049813", "0.5047804", "0.50423026", "0.5039913", "0.50359094", "0.502582", "0.50243616", "0.5021007", "0.5017556", "0.5012219", "0.50085443", "0.5001088", "0.49959034", "0.49942163", "0.49935493", "0.4993132", "0.49884018", "0.49875066", "0.4987498", "0.49766028", "0.4970928", "0.49709135", "0.49709135", "0.49691013", "0.49663976", "0.49637827", "0.49627048", "0.4962687", "0.49625567", "0.4961342" ]
0.5056924
69
get the scale factor required to put the image on the reference zero point
def _get_scale_from_magzp(self, magzp): scale = 10.0**( 0.4*(self['magzp_ref']-magzp) ) return scale
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_scale():\r\n\r\n \r\n return 0.5", "def scale(self):\n return self.scale_factor / CONSTANTS.AU", "def scale(self):\n return self._scale", "def auto_scale_factor(self):\r\n return self.gref.auto_scale_factor", "def scale(self):", "def pixel_scale(self):\n return np.abs(float(self.header[\"CDELT1\"]))", "def scale(self):\n return self._gev_bijector.scale", "def GetScale(self):\n ...", "def scale(self):\n return self.distribution.scale", "def imageScale(scale):\n\t\treturn max(1, int(scale * (InterfaceTools.getCanvasSize()[0] / height)))", "def scale(self):\n return self._a", "def getScale(self):\n return self.factor**self.turnOn", "def get_scale_factor(rec, stack):\n \n rec_pixel_size = get_pixel_size_rec(rec)\n stack_pixel_size = get_pixel_size_stack(stack)\n \n return rec_pixel_size / stack_pixel_size", "def scaling(self):\n return self.__scaling", "def factor_to_scale(factor):\n return 1 / B.sqrt(4 * factor / B.pi)", "def img_scale(self):\n return min(400, abs(self.size))", "def scale_to_factor(scale):\n return (B.pi / 2) / (2 * scale**2)", "def scaling(self):\n return self._scaling", "def scaling(self):\n return self._scaling", "def prescaler(self) -> int:", "def _scale(x):\n scaleFactor = 1\n _ret = int(x/scaleFactor)\n return _ret", "def scale0to1(img):\r\n\r\n img = img.astype(np.float32)\r\n\r\n if img.shape[-1] != 1:\r\n img = np.sqrt(np.sum(img**2, axis=-1, keepdims=True))\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if np.absolute(min-max) < 1.e-6:\r\n img.fill(0.5)\r\n else:\r\n img = (img - min)/(max - min)\r\n\r\n return img.astype(np.float32)", "def scale(self) -> Tuple[float, float]:\n return self._scale", "def getScale(self):\n \n dag_node = OpenMaya.MFnDagNode(self.thisObj)\n transform_node = OpenMaya.MFnTransform(dag_node.parent( 0 ))\n \n util = OpenMaya.MScriptUtil()\n util.createFromDouble(0.0, 0.0, 0.0)\n pointeur = util.asDoublePtr()\n transform_node.getScale(pointeur)\n \n sx = util.getDoubleArrayItem(pointeur, 0)\n sy = util.getDoubleArrayItem(pointeur, 1)\n sz = util.getDoubleArrayItem(pointeur, 2)\n\n return sx, sy, sz", "def FindScale(self):\n\n ## 6 and from the cv code the distance is 6 then we are good\n print(\"TODO: Very hard\")", "def scale(self, value):\r\n return (float(value)-float(self.minimum))/float(self.maximum-self.minimum)*2.0 - 1.0", "def scale0to1(img):\r\n\r\n img = img.astype(np.float32)\r\n\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if np.absolute(min-max) < 1.e-6:\r\n img.fill(0.5)\r\n else:\r\n img = (img-min) / (max-min)\r\n\r\n return img.astype(np.float32)", "def scale0to1(img):\r\n\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if min == max:\r\n img.fill(0.5)\r\n else:\r\n img = (img-min) / (max-min)\r\n\r\n return img.astype(np.float32)", "def scale(self, value):\n return (float(value) - float(self.minimum)) / \\\n float(self.maximum - self.minimum) * 2.0 - 1.0", "def scale(input):\n return (input - np.min(input)) / ((np.max(input) - np.min(input)))", "def getScale(self):\n return _libsbml.Unit_getScale(self)", "def loss_scale(self):\n return self._loss_scale", "def scale(image, maxval=1024):\n image += maxval # minimum value is now 0\n image /= maxval*2\n\n return(image)", "def compute_display_scale_factor(self, decimated_image):\n\n # TODO: division may not work as expected in Python 2 (int versus float)\n # what is the intent here?\n decimated_image_nx = decimated_image.shape[1]\n decimated_image_ny = decimated_image.shape[0]\n scale_factor_1 = self.canvas_nx/decimated_image_nx\n scale_factor_2 = self.canvas_ny/decimated_image_ny\n scale_factor = min(scale_factor_1, scale_factor_2)\n return scale_factor", "def scaling(mat, factor):\n\treturn mat / (mat + factor)", "def scale(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"scale\")", "def scale(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"scale\")", "def scale(val, src, dst):\n try:\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]\n except ZeroDivisionError:\n return 0.0", "def getnscale(self):\n return self.nscale", "def _scale_to_zero_one(img):\n if img.dtype == np.uint8:\n img = img.astype(np.float32)\n return np.multiply(img, 1.0 / 255.0)\n else:\n print(\"image values already seem to be float\")\n return img", "def _check_scale_factor(\n spatial_data: Optional[Mapping],\n img_key: Optional[str],\n scale_factor: Optional[float],\n) -> float:\n if scale_factor is not None:\n return scale_factor\n elif spatial_data is not None and img_key is not None:\n return spatial_data[\"scalefactors\"][f\"tissue_{img_key}_scalef\"]\n else:\n return 1.0", "def set_scale_factors_to_output_size(self):\n # Compute the scale_factor using rounded scaled image size.\n height = tf.shape(self._image)[0]\n width = tf.shape(self._image)[1]\n max_image_size = tf.to_float(tf.maximum(height, width))\n image_scale = tf.to_float(self._output_size) / max_image_size\n scaled_height = tf.to_int32(tf.to_float(height) * image_scale)\n scaled_width = tf.to_int32(tf.to_float(width) * image_scale)\n self._image_scale = image_scale\n self._scaled_height = scaled_height\n self._scaled_width = scaled_width", "def overlay_scale(self):\n return self._overlay_scale", "def scaling(self):\n return self.stacked._box_scaling[1]", "def _scale(self, normalize, mat):\n mat = mat.astype(float)\n if normalize:\n mat = sklearn_norm(mat,\n feature_range=(0, 1),\n axis=0,\n copy=True)\n else:\n return mat\n return mat", "def get_scale(self):\n\n if not hasattr(self, \"scale\"):\n raise NotImplementedError(\"\"\"All end-use subclasses of Color should define\n a get_scale method.\"\"\")\n\n return self.scale", "def scale_factor(self, z = 0.):\n return 1./(1.+z)", "def plate_scale(self):\n\n return 206265 * uu.arcsec / (self.diameter.to('mm') * self.f)", "def scale(val, src, dst):\r\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]", "def _get_target_scale(self, im_size_min, im_size_max, target_size, max_size):\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than max_size\n if np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) / float(im_size_max)\n return im_scale", "def max_scale_image(self):\n maximum = np.argmax(self.transform, 0)\n return self.scale_array[maximum] * (self.support.sum(0) > 0)", "def scale_root(self) -> int:\r\n ...", "def get_zoom(self) -> float:\n transform = self.transform()\n cur_scale = (transform.m11(), transform.m22())\n return float(f\"{cur_scale[0] - 1.0:0.2f}\")", "def Rescale(self):\r\n picWidth,picHeight = self.oldSize = self.GetSizeTuple()\r\n bitmap = self.scaled = self.bitmap\r\n if not bitmap: return\r\n imgWidth,imgHeight = bitmap.GetWidth(),bitmap.GetHeight()\r\n if self.scaling == 2 or (self.scaling == 1 and (imgWidth > picWidth or imgHeight > picHeight)):\r\n image = bitmap.ConvertToImage()\r\n factor = min(1.0*picWidth/imgWidth,1.0*picHeight/imgHeight)\r\n newWidth,newHeight = int(factor*imgWidth),int(factor*imgHeight)\r\n self.scaled = image.Scale(newWidth,newHeight).ConvertToBitmap()\r\n #self.scaled = image.Scale(newWidth,newHeight,wx.IMAGE_QUALITY_HIGH ).ConvertToBitmap()\r", "def get_scale_parameter(self):\r\n \r\n if self.scale_parameter == 0.0: \r\n shape_in_gamma_func = float(1+(1/self.shape_parameter))\r\n gamma_func = special.gamma(shape_in_gamma_func)\r\n self.scale_parameter = (self.mean_fire_recurrence/gamma_func)\r\n return self.scale_parameter\r\n else:\r\n return self.scale_parameter", "def normScale( x, y ):\n if x == 0 and y == 0:\n return 0\n else:\n return 1.0 / pow( x*x + y*y, 0.5 )", "def scale(self, value):\n\t\toldscale = self.oldmax - self.oldmin\n\t\tnewscale = self.newmax - self.newmin\n\t\treturn (newscale * (value - self.oldmin) / oldscale) + self.newmin", "def _get_scaling(root):\n dpi = root.winfo_fpixels(\"1i\")\n scaling = dpi / 72.0\n logger.debug(\"dpi: %s, scaling: %s'\", dpi, scaling)\n return scaling", "def get_scale(self):\r\n try: return self.scale[0], self.scale[1], self.scale[2]\r\n except: return self.scale, self.scale, self.scale", "def scale(val, src, dst):\n return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]", "def colorscale(self):\n return self['colorscale']", "def get_scale_parameter(self):\n\n if self.scale_parameter == 0.0:\n shape_in_gamma_func = float(1+(1/self.shape_parameter))\n gamma_func = special.gamma(shape_in_gamma_func)\n self.scale_parameter = (self.mean_fire_recurrence/gamma_func)\n return self.scale_parameter\n else:\n return self.scale_parameter", "def get_apply_scale(self, applyScaleFactor, scale_quality = 1.0):\n v = self.scale * self.scale_quality * scale_quality\n if applyScaleFactor:\n v *= self.scale_factor\n return v", "def scaling_factor(self):\n bin_scale = self.spabins * self.spebins\n return bin_scale * self.int_time", "def scale(val, src, dst):\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]", "def scale(val, src, dst):\n return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]", "def normalize_0d(x, old_scale=(0, 1, 1024), new_scale=(0, 1, 1024)):\n old_delta = old_scale[1] - old_scale[0]\n new_delta = new_scale[1] - new_scale[0]\n old_min = old_scale[0]\n new_min = new_scale[0]\n return (x - old_min) * (new_delta / old_delta) + new_min", "def fraction_full_scale(self):\n return self._fraction_full_scale", "def normalize(img):\r\n min = img.min()\r\n max = img.max()\r\n x = 2.0 * (img - min) / (max - min) - 1.0\r\n return x", "def pixel_size_ratio(self):\n return 2**(self.levels[-1] - self.levels[0])", "def get_loss_scale(self):\n return self._loss_scale", "def reversescale(self):\n return self['reversescale']", "def colorscale(self):\n return self[\"colorscale\"]", "def _pixel_scale(self, width=None, height=None, scale=None):\n if numpy.count_nonzero([width is not None, height is not None, scale is not None]) > 1:\n raise ValueError(\"Specify only one of width, height, or scale.\")\n if width is not None:\n scale = width / self._width\n elif height is not None:\n scale = height / self._height\n elif scale is None:\n scale = 1.0\n return scale", "def calculate_image_scale(source_width, source_height, target_width, target_height):\n if source_width == target_width and source_height == target_height:\n return 1.0\n\n source_ratio = source_width / source_height\n target_ratio = target_width / target_height\n\n if target_ratio < source_ratio:\n scale = target_width / source_width\n else:\n scale = target_height / source_height\n\n return scale", "def rescale(tx):\n mins = np.amin(tx, axis=0)\n maxs = np.amax(tx, axis=0)\n txscale = (tx - mins) / (maxs - mins)\n return txscale", "def downScaleResolution(kv, factor=10):\n sub_img_name = kv[0]\n sub_image = kv[1]\n img_dimension = len(sub_image)\n big_image = sub_image\n Nbig = img_dimension\n Nsmall = Nbig//factor\n small_image = big_image.reshape([Nsmall, Nbig // Nsmall, Nsmall, Nbig // Nsmall]).mean(3).mean(1)\n return (sub_img_name,small_image)", "def rolloff_scale(self):\n return self._rolloffscale", "def scale4x(self) -> 'BaseImage':\n return self.scale2x().scale2x()", "def scale(self, sf):\n self.scale(sf, sf)", "def rescale_intrinsic(self):\n # scale focal length and principal points wrt image resizeing\n if self.downscale > 1:\n self.K = self.K_orig.copy()\n self.K[0, 0] /= float(self.downscale)\n self.K[1, 1] /= float(self.downscale)\n self.K[0, 2] /= float(self.downscale)\n self.K[1, 2] /= float(self.downscale)\n self.intrinsic = self.K\n else:\n self.K = self.intrinsic = self.K_orig.copy()", "def reversescale(self):\n return self[\"reversescale\"]", "def _scale_param(self, resid_us):\n return((resid_us**2).sum().sum() / self.dof)", "def scale_parameter(self):\n return self._scale_parameter", "def parallel_scale(self):\n return self.camera.parallel_scale", "def reScaleLandsat(self,img):\n \n\t\tthermalBand = ee.List(['thermal'])\n\t\tthermal = ee.Image(img).select(thermalBand).multiply(10)\n \n\t\totherBands = ee.Image(img).bandNames().removeAll(thermalBand)\n\t\tscaled = ee.Image(img).select(otherBands).divide(0.0001)\n \n\t\timage = ee.Image(scaled.addBands(thermal)).int16()\n \n\t\treturn image.copyProperties(img)", "def get_scale_freq():\n return sf / 2 / (num_freq-1)", "def scale_value(self):\n return self._scale_value[2]", "def rscale(mag=10.0):\n if mag > 11.5:\n return 0.5\n elif mag > 11.0:\n return 1.0\n elif mag > 10.5:\n return 1.5\n elif mag > 10.0:\n return 1.5\n elif mag > 9.5:\n return 2.0\n elif mag > 9.0:\n return 2.5\n elif mag > 8.5:\n return 3.0\n else:\n return 3.5", "def zoom(self):\n res = np.max(self.metadata[\"resolution\"])\n\n if self.atlas_name == \"allen_human_500um\":\n logger.debug(\n \"ATLAS: setting zoom manually for human atlas, atlas needs fixing\"\n )\n return 350\n else:\n return 40 / res", "def normalize(x):\n a = 0\n b = 1\n scale_min = 0\n scale_max = 255\n return a + ( ( (x - scale_min)*(b - a) )/( scale_max - scale_min ) )", "def doppler_scale(self):\n return self._dopplerscale", "def zoom_to_size(self, *p):\n\t\tif self.image is None or self.allocation is None:\n\t\t\treturn\n\t\tif __debug__: print self.allocation.width, self.image.get_width()\n\t\tif __debug__: print self.allocation.width, self.image.get_width(), self.allocation.width/self.image.get_width()\n\t\tz = min(\n\t\t\tself.allocation.width/self.image.get_width(),\n\t\t\tself.allocation.height/self.image.get_height()\n\t\t\t)\n\t\tif __debug__: print \"zoom_to_size\", \"z=\", z\n\t\tself.zoom = z", "def normalize(image):\r\n return image / 127.5 - 1.", "def _scale(self, image):\n\n if image.GetWidth() != self._width or image.GetHeight()!= self._height:\n image.Rescale(self._width, self._height)\n \n return image", "def scale(img, scale):\n return resize(img, x_scale=scale, y_scale=scale)", "def scale(inp, ab):\n\n return inp * ab[0] + ab[1]", "def imgNormalize(img): \n constant = np.sum(sitk.GetArrayFromImage(img))*np.prod(img.GetSpacing())\n return img/constant", "def scale(self, factor):\n self.b = factor * self.b", "def scale(self) -> Optional[pulumi.Input['ScaleArgs']]:\n return pulumi.get(self, \"scale\")" ]
[ "0.7939209", "0.75170743", "0.7390683", "0.7387904", "0.72454107", "0.72146994", "0.71663", "0.7162196", "0.7084686", "0.70655787", "0.6965191", "0.69290656", "0.6879681", "0.6862533", "0.684772", "0.6845276", "0.68248934", "0.6792253", "0.6792253", "0.6774864", "0.67718226", "0.676984", "0.67364895", "0.67214656", "0.6713993", "0.67079145", "0.6688022", "0.66800725", "0.66579276", "0.66240764", "0.65998054", "0.659218", "0.65889263", "0.65869176", "0.6575881", "0.6552234", "0.6532833", "0.6519408", "0.65007275", "0.6485796", "0.6483468", "0.6480529", "0.64762086", "0.64694506", "0.6453664", "0.6444191", "0.64410484", "0.64294165", "0.6421194", "0.6418623", "0.6409128", "0.64056206", "0.6392704", "0.6368047", "0.6361764", "0.63560283", "0.6351045", "0.6337589", "0.6320056", "0.630639", "0.6291833", "0.62879133", "0.62771934", "0.62770456", "0.6276428", "0.6276428", "0.6260383", "0.6246133", "0.62457395", "0.62361556", "0.6233152", "0.62308776", "0.62209564", "0.6215892", "0.61952823", "0.6179802", "0.617541", "0.61521715", "0.6146279", "0.6140903", "0.6135144", "0.61258584", "0.61253405", "0.61248094", "0.6114964", "0.61073434", "0.61030245", "0.60966396", "0.6093608", "0.60744023", "0.6065569", "0.6062137", "0.60616595", "0.6043745", "0.60377365", "0.603631", "0.6020619", "0.60193336", "0.6012952", "0.60101575" ]
0.65182626
38
run fpack on the file
def _compress_meds_file(self, ucfilename, fzfilename): from os.path import basename tup=(basename(ucfilename),basename(fzfilename)) print('compressing file: %s -> %s' % tup) tpath=files.expandpath(fzfilename) if os.path.exists(tpath): os.remove(tpath) tmpdir = os.path.dirname(ucfilename) with StagedOutFile(fzfilename,tmpdir=tmpdir) as sf: cmd = self['fpack_command'] cmd = cmd.format(fname=ucfilename) ret=os.system(cmd) if ret != 0: raise RuntimeError("failed to compress file") print('output is in:',fzfilename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fpack (filename):\n\n try:\n\n # fits check if extension is .fits and not an LDAC fits file\n if filename.split('.')[-1] == 'fits' and '_ldac.fits' not in filename:\n header = read_hdulist(filename, get_data=False, get_header=True,\n ext_name_indices=0)\n\n # check if it is an image\n if int(header['NAXIS'])==2:\n # determine if integer or float image\n if int(header['BITPIX']) > 0:\n cmd = ['fpack', '-D', '-Y', '-v', filename]\n else:\n if 'Scorr' in filename or 'limmag' in filename:\n quant = 2\n elif 'Fpsf' in filename:\n quant = 4\n else:\n quant = 16\n\n cmd = ['fpack', '-q', str(quant), '-D', '-Y', '-v', filename]\n\n\n # if output fpacked file already exists, delete it\n filename_packed = '{}.fz'.format(filename)\n if isfile(filename_packed):\n #os.remove(filename_packed)\n remove_files([filename_packed])\n log.warning ('fpacking over already existing file {}'\n .format(filename_packed))\n\n subprocess.run(cmd)\n filename = filename_packed\n\n\n except Exception as e:\n #log.exception (traceback.format_exc())\n log.exception ('exception was raised in fpacking of image {}: {}'\n .format(filename,e))\n\n\n return filename", "def openAndPack(filename):\n inputfile = open(filename, 'rb')\n return inputfile.read()", "def pack():\n PackCommandExecutor().pack()", "def _pack_ex(file, names, cwd, implementor=None):\n assert isdir(cwd)\n if exists(file):\n console.rm(file)\n if not implementor: implementor = GzipTarredFile\n \n with console.cd(cwd):\n relnames = [relpath(name, cwd) for name in names]\n implementor.pack(relnames, file)\n return file", "def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()", "def pack_contents(file, d, implementor=None):\n d = realpath(d) # avoid symlink confusion in pack_ex\n return _pack_ex(file, os.listdir(d), d, implementor)", "def package():\n \n hou.hipFile.save()\n currentHip = hou.expandString(hou.hipFile.name())\n\n # create a temp directory we are going to fill with crap\n tempFilePath = tempfile.mkdtemp()\n \n otls = os.path.join(tempFilePath, \"otls\")\n os.mkdir(otls)\n files = os.path.join(tempFilePath, \"files\")\n os.mkdir(files)\n \n # Get all the external references to the hipfile\n fileOnDisk = hou.fileReferences()\n\n # loop and do what comes natural.\n for _file in fileOnDisk:\n\n parm = _file[0]\n filepath = _file[1]\n \n # if its a otl we need to store it.\n if filepath.endswith(\".otl\"):\n \n shutil.copy(hou.expandString(filepath), otls)\n \n else:\n \n if not os.path.isfile(hou.expandString(filepath)): \n \n continue\n \n # create a directory in files and save 1 file to that location\n tmpFileName = os.path.basename(hou.expandString(filepath))\n tmpFileDir = os.path.basename(os.path.dirname(hou.expandString(filepath)))\n path = os.path.join(files, tmpFileDir)\n \n if not os.path.isdir(path):\n \n os.mkdir(path)\n\n shutil.copy(hou.expandString(filepath), os.path.join(path, os.path.basename(hou.expandString(filepath))))\n\n try:\n \n if not parm.node().isLocked():\n \n parm.set(os.path.join(path.replace(tempFilePath, \"$HIP\"), tmpFileName))\n \n except hou.PermissionError: \n \n logging.warning(\"Error hardening parm :\" + str(parm.name()) + \"on node \" +parm.node().path())\n\n hou.hipFile.save(os.path.join(tempFilePath, os.path.basename(hou.expandString(hou.hipFile.name()))))\n # Load the source hipfile\n hou.hipFile.load(currentHip)\n \n # create a zipfile and package everything. then copy it to the home.\n zipfileLoc = zipdir(tempFilePath)\n shutil.move(zipfileLoc, os.path.join(hou.expandString(\"~\"), \"package.zip\"))\n shutil.rmtree(tempFilePath)", "def do_pack():\n\n now = datetime.now()\n # format the name of the file with the timestamps\n now_year = now.year\n now_month = now.month\n now_day = now.day\n now_hour = now.hour\n now_minute = now.minute\n now_second = now.second\n # apply the format\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n now_year, now_month, now_day, now_hour, now_minute, now_second\n )\n # All archives must be stored in the folder versions\n local('mkdir -p versions')\n # execute locally the compression of the folder\n command = local(\"tar -cvzf \" + file_name + \" ./web_static/\")\n # return the archive path if the archive has been correctly generated\n if command.succeeded:\n return file_name\n else:\n return None", "def pack_single(file, path, implementor=None):\n path = realpath(path) # avoid symlink confusion in pack_ex\n return _pack_ex(file, [path], dirname(path), implementor)", "def svn_fs_pack(*args):\r\n return _fs.svn_fs_pack(*args)", "def post_package():\n package_file = BytesIO()\n with tarfile.open(mode='w', fileobj=package_file) as tar:\n # metadata\n meta_content = b'encoding: utf-8\\npost: post.md'\n file_info = tarfile.TarInfo('package.yml')\n file_info.size = len(meta_content)\n tar.addfile(file_info, BytesIO(meta_content))\n\n # post\n post_content = b'''---\ntitle: A title\ntopic: A topic\n---\n\n[summary]\nA summary\n\nA paragraph\n'''\n file_info = tarfile.TarInfo('post.md')\n file_info.size = len(post_content)\n tar.addfile(file_info, BytesIO(post_content))\n package_file.seek(0)\n\n return package_file", "def do_package(package):\n\tn_ucr = extFile(package, 'univention-config-registry')\n\tif not os.path.exists(n_ucr):\n\t\treturn\n\n\tf_ucr = open(n_ucr, 'r')\n\n\tfor item in univention.config_registry.parseRfc822(f_ucr.read()):\n\t\ttyp = item['Type'][0]\n\t\tif typ == 'file':\n\t\t\tf = item['File'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\t\tfor key in ('Preinst', 'Postinst'):\n\t\t\t\tif key in item:\n\t\t\t\t\tmod = item[key][0]\n\t\t\t\t\tdoIt('install', '-d', destDir(mod, package, 'modules'))\n\t\t\t\t\tdoIt('cp', '-a', srcPath(mod), destPath(mod, package, 'modules'))\n\t\telif typ == 'subfile':\n\t\t\tf = item['Subfile'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\telif typ == 'multifile':\n\t\t\tf = item['Multifile'][0]\n\t\t\tif os.path.exists(f):\n\t\t\t\tdoIt('install', '-d', destDir(f, package))\n\t\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package))\n\t\t\tfor key in ('Preinst', 'Postinst'):\n\t\t\t\tif key in item:\n\t\t\t\t\tmod = item[key][0]\n\t\t\t\t\tdoIt('install', '-d', destDir(mod, package, 'modules'))\n\t\t\t\t\tdoIt('cp', '-a', srcPath(mod), destPath(mod, package, 'modules'))\n\t\telif typ == 'script':\n\t\t\tf = item['Script'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package, 'scripts'))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package, 'scripts'))\n\t\t\tdoIt('chmod', 'a+x', destPath(f, package, 'scripts'))\n\t\telif typ == 'module':\n\t\t\tf = item['Module'][0]\n\t\t\tdoIt('install', '-d', destDir(f, package, 'modules'))\n\t\t\tdoIt('cp', '-a', srcPath(f), destPath(f, package, 'modules'))\n\t\telse:\n\t\t\tprint >>sys.stderr, 'Unknown type: %s' % typ\n\t\t\treturn\n\n\tf_ucr.close()\n\n\tdoIt('install', '-d', destDir('', package, 'info'))\n\tdoIt('install', '-m644', n_ucr, destPath(package+'.info', package, 'info'))\n\tmapping_file = extFile( package, 'univention-config-registry-mapping')\n\tif os.path.exists(mapping_file):\n\t\tdoIt('install', '-d', destDir('', package, 'mapping'))\n\t\tdoIt('install', '-m644', mapping_file, destPath(package+'.univention-config-registry-mapping', package, 'mapping'))\n\n\tdata = {\n\t\t\t'pkg': quote(package),\n\t\t\t'info': quote(\"/etc/univention/templates/info/%s.info\" % package),\n\t\t\t'removed': quote(\"/etc/univention/templates/removed/%s.info\" % package),\n\t\t\t}\n\n\tf_preinst = open(extFile(package, 'preinst.debhelper'), 'a')\n\tf_preinst.write('# Automatically added by univention-install-config-registry\\n')\n\tf_preinst.write('if [ \"$1\" = \"install\" ] ; then\\n')\n\tf_preinst.write(' [ -e %(removed)s ] && [ ! -e %(info)s ] && mv %(removed)s %(info)s || true\\n' % data)\n\tf_preinst.write('fi\\n')\n\tf_preinst.write('# End automatically added section\\n')\n\tf_preinst.close()\n\n\tf_postinst = open(extFile(package, 'postinst.debhelper'), 'a')\n\tf_postinst.write('# Automatically added by univention-install-config-registry\\n')\n\tf_postinst.write('if [ \"$1\" = \"abort-remove\" ]; then\\n')\n\tf_postinst.write(' [ -e %(removed)s ] && mv %(removed)s %(info)s || true\\n' % data)\n\tf_postinst.write('fi\\n')\n\tf_postinst.write('[ -x /usr/sbin/univention-config-registry ] && univention-config-registry register %(pkg)s || true\\n' % data)\n\tf_postinst.write('# End automatically added section\\n')\n\tf_postinst.close()\n\n\tf_prerm = open(extFile(package, 'prerm.debhelper'), 'a')\n\tf_prerm.write('# Automatically added by univention-install-config-registry\\n')\n\tf_prerm.write('if [ \"$1\" = \"remove\" ] && [ -e %(info)s ] ; then\\n' % data)\n\tf_prerm.write(' [ -x /usr/sbin/univention-config-registry ] && univention-config-registry unregister %(pkg)s || true\\n' % data)\n\tf_prerm.write(' mv %(info)s %(removed)s || true\\n' % data)\n\tf_prerm.write('fi\\n')\n\tf_prerm.write('# End automatically added section\\n')\n\tf_prerm.close()\n\n\tdoIt('perl', '-e', 'use Debian::Debhelper::Dh_Lib;addsubstvar(\"%s\", \"misc:Depends\", \"univention-config (>= 7.0.25)\");' % package)", "def pack(file_path, extension):\n package_dir = file_path.split('.')[0] + '.' + extension\n print 'package_dir', package_dir\n name = file_path.split('/')[-1]\n\n if extension in ['tar', 'tar.gz', 'tgz', 'tar.bz2']:\n # tar file\n mode = ''\n if extension in ['tar.gz', 'tgz']:\n mode = 'gz'\n elif extension in ['tar.bz2']:\n mode = 'bz2'\n with tarfile.open(name=package_dir, mode='w:%s' % mode) as tar:\n tar.add(file_path, arcname=name)\n elif extension in ['zip']:\n with zipfile.ZipFile(b, 'w') as zf:\n zf.write(file_path, arcname=name)\n else:\n pass\n\n return package_dir", "def pack_content(bsp_file: BSP, path: str, is_peti: bool):\n files = set() # Files to pack.\n soundscripts = set() # Soundscripts need to be added to the manifest too..\n rem_soundscripts = set() # Soundscripts to exclude, so we can override the sounds.\n particles = set()\n additional_files = set() # .vvd files etc which also are needed.\n preload_files = set() # Files we want to force preloading\n\n try:\n pack_list = open(path[:-4] + '.filelist.txt')\n except (IOError, FileNotFoundError):\n pass # Assume no files if missing..\n # There might still be things to inject.\n else:\n with pack_list:\n for line in pack_list:\n line = line.strip().lower()\n if not line or line.startswith('//'):\n continue # Skip blanks or comments\n\n if line[:8] == 'precache':\n preload_files.add(line)\n continue\n\n if line[:2] == '-#':\n rem_soundscripts.add(line[2:])\n continue\n\n if line[:1] == '#':\n line = line[1:]\n soundscripts.add(line)\n\n # We need to add particle systems to a manifest.\n if line.startswith('particles/'):\n particles.add(line)\n\n if line[-4:] == '.mdl':\n additional_files.update({\n line[:-4] + ext\n for ext in\n MDL_ADDITIONAL_EXT\n })\n\n files.add(line)\n\n # Remove guessed files not in the original list.\n additional_files -= files\n\n # Only generate a soundscript for PeTI maps..\n if is_peti:\n music_data = CONF.find_key('MusicScript', [])\n if music_data.value:\n generate_music_script(music_data, files)\n # Add the new script to the manifest file..\n soundscripts.add('scripts/BEE2_generated_music.txt')\n\n # We still generate these in hammer-mode - it's still useful there.\n # If no files are packed, no manifest will be added either.\n gen_sound_manifest(soundscripts, rem_soundscripts)\n gen_part_manifest(particles)\n gen_auto_script(preload_files, is_peti)\n\n inject_names = list(inject_files())\n\n # Abort packing if no packfiles exist, and no injected files exist either.\n if not files and not inject_names:\n LOGGER.info('No files to pack!')\n return\n\n LOGGER.info('Files to pack:')\n for file in sorted(files):\n # \\t seperates the original and in-pack name if used.\n LOGGER.info(' # \"' + file.replace('\\t', '\" as \"') + '\"')\n\n if additional_files and LOGGER.isEnabledFor(logging.DEBUG):\n LOGGER.info('Potential additional files:')\n for file in sorted(additional_files):\n LOGGER.debug(' # \"' + file + '\"')\n\n LOGGER.info('Injected files:')\n for _, file in inject_names:\n LOGGER.info(' # \"' + file + '\"')\n\n LOGGER.info(\"Packing Files!\")\n\n # Manipulate the zip entirely in memory\n zip_data = BytesIO()\n zip_data.write(bsp_file.get_lump(BSP_LUMPS.PAKFILE))\n zipfile = ZipFile(zip_data, mode='a')\n LOGGER.debug(' - Existing zip read')\n\n zip_write = get_zip_writer(zipfile)\n\n for file in files:\n pack_file(zip_write, file)\n\n for file in additional_files:\n pack_file(zip_write, file, suppress_error=True)\n\n for filename, arcname in inject_names:\n LOGGER.info('Injecting \"{}\" into packfile.', arcname)\n zip_write(filename, arcname)\n\n LOGGER.debug(' - Added files')\n\n zipfile.close() # Finalise the zip modification\n\n # Copy the zipfile into the BSP file, and adjust the headers\n bsp_file.replace_lump(\n path,\n BSP_LUMPS.PAKFILE,\n zip_data.getvalue(), # Get the binary data we need\n )\n LOGGER.debug(' - BSP written!')\n\n LOGGER.info(\"Packing complete!\")", "def processfile(args, fh):\n if args.quick:\n scanner = quickScanZip(args, fh)\n else:\n scanner = findPKHeaders(args, fh)\n\n def checkarg(arg, ent):\n if not arg:\n return False\n return '*' in arg or ent.name in arg\n def checkname(a, b):\n if a and '*' in a: return True\n if b and '*' in b: return True\n l = 0\n if a: l += len(a)\n if b: l += len(b)\n return l > 1\n\n if args.verbose and not (args.cat or args.raw or args.save):\n print(\" 0304 need flgs mth stamp --crc-- compsize fullsize nlen xlen namofs xofs datofs endofs\")\n print(\" 0102 crea need flgs mth stamp --crc-- compsize fullsize nlen xlen clen dsk0 attr osattr datptr namofs xofs cmtofs endofs\")\n for ent in scanner:\n if args.cat or args.raw or args.save:\n if args.quick and isinstance(ent, CentralDirEntry) or \\\n not args.quick and isinstance(ent, LocalFileHeader):\n ent.loaditems(fh)\n do_cat = checkarg(args.cat, ent)\n do_raw = checkarg(args.raw, ent)\n do_save= checkarg(args.save, ent)\n\n do_name= checkname(args.cat, args.raw)\n\n if do_name:\n print(\"\\n===> \" + ent.name + \" <===\\n\")\n\n sys.stdout.flush()\n blks = zipraw(fh, ent)\n\n if args.password and ent.flags&1:\n blks = zip_decrypt(blks, args.password)\n if do_cat or do_save:\n blks = skipbytes(blks, 12, args)\n\n if do_cat:\n sys.stdout.buffer.writelines(zipcat(blks, ent))\n if do_raw:\n sys.stdout.buffer.writelines(blks)\n if do_save:\n savefile(args.outputdir, ent.name, zipcat(blks, ent))\n else:\n ent.loaditems(fh)\n if args.verbose or not args.quick:\n print(\"%08x: %s\" % (ent.pkOffset, ent))\n else:\n print(ent.summary())\n if hasattr(ent, \"comment\") and ent.comment and not args.dumpraw:\n print(ent.comment)\n if args.dumpraw and hasattr(ent, \"extraLength\") and ent.extraLength:\n print(\"%08x: XTRA: %s\" % (ent.extraOffset, binascii.b2a_hex(getbytes(fh, ent.extraOffset, ent.extraLength))))\n if args.dumpraw and hasattr(ent, \"comment\") and ent.comment:\n print(\"%08x: CMT: %s\" % (ent.commentOffset, binascii.b2a_hex(getbytes(fh, ent.commentOffset, ent.commentLength))))\n if args.dumpraw and isinstance(ent, LocalFileHeader):\n blks = zipraw(fh, ent)\n if args.password and ent.flags&1:\n blks = zip_decrypt(blks, args.password)\n\n blockdump(ent.dataOffset, blks)", "def pack(filename: Union[str, Path], source_dir: Union[str, Path]) -> None:\n raise NotImplemented", "def do_pack():\n from os import mkdir, path\n\n filename = \"web_static_{}.tgz\".format(now.strftime(\"%Y%m%d%H%M%S\"))\n filepath = \"versions/{}\".format(filename)\n\n try:\n mkdir('./versions')\n except FileExistsError:\n pass\n\n print(\"Packing web_static to {}\".format(filepath))\n cmd = local('tar -cvzf {} web_static'.format(filepath))\n if (cmd.return_code == 0):\n filesize = path.getsize(filepath)\n print(\"web_static packed: {} -> {}Bytes\".format(filepath, filesize))\n return filepath\n return None", "def test_file_package_request(self):\n file_name = \"file_name\"\n chunk_index = 0\n\n expected_topic = self.factory.common_topic + WAPMF.FILE_BINARY_REQUEST\n expected_payload = json.dumps(\n {\n \"name\": file_name,\n \"chunkIndex\": chunk_index,\n }\n )\n expected_message = Message(expected_topic, expected_payload)\n serialized_message = self.factory.make_from_package_request(\n file_name, chunk_index\n )\n\n self.assertEqual(expected_message, serialized_message)", "def do_pack():\n local(\"sudo mkdir -p versions\")\n date_time = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n name_file = \"versions/web_static{}.tgz\".format(date_time)\n local(\"sudo tar -cvzf {} web_static\".format(name_file))\n return name_file", "def package(self, outfile, update=False, local=True, remote=True):\n log.debug(\"Packaging and streaming %s\" % self.name)\n with TarPackaging(outfile) as tar:\n self._build(tar, update, local, remote, True)\n log.debug(\"Packaged %s\" % self.name)", "def _pack(self):\n pass", "def pack(backend_name, patterns, size, minimum, yes):\n # Load the backend\n backend = get_backend(backend_name)\n # Find the paths\n click.echo(\"Scanning files... \", nl=False)\n paths, size_used = Scanner(config.root_path, patterns).unstored_paths(\n config.index, size * (1024 ** 3)\n )\n click.secho(\"Done\", fg=\"green\")\n if not paths:\n click.secho(\"No files found to add.\", fg=\"yellow\")\n return\n # Print what we found\n for path in paths:\n click.echo(\"> \" + click.style(path, fg=\"blue\"))\n click.echo(\"%s files, %s\" % (len(paths), human_size(size_used)))\n # Prompt to continue\n if not yes:\n if not click.confirm(\"Proceed with build?\"):\n return\n click.echo()\n # Select an unused archive ID\n archive_id = config.index.new_archive_id()\n # Pack the volume\n archive = Archive.from_files(archive_id, paths, config.root_path)\n click.echo(f\"Archive is {archive.id}, size {human_size(archive.size)}\")\n if archive.size < minimum * (1024 ** 3):\n click.echo(\"Archive too small, quitting\")\n sys.exit(1)\n backend.archive_store(config.root_path, archive)\n click.echo(\"Archive stored\")\n config.index.add_archive(archive, backend_name)\n click.echo(\"Archive indexed\")", "def do_pack():\n time_test = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n file_name = \"versions/web_static_\" + time_test + \".tgz\"\n command1 = \"mkdir -p versions\"\n command2 = \"tar -czvf \" + file_name + \" web_static\"\n local(command1)\n com = local(command2)\n if com.return_code == 0:\n return file_name\n else:\n return None", "def assemble(self, file):\n self.pass_1(file)\n self.pass_2(file, self.get_hack_file(file))", "def do_pack():\n a = datetime.now()\n file_name = \"versions/web_static_{}{}{}{}{}{}.tgz\\\n\".format(a.year if a.year > 999 else \"0\" + str(a.year),\n a.month if a.month > 9 else \"0\" + str(a.month),\n a.day if a.day > 9 else \"0\" + str(a.day),\n a.hour if a.hour > 9 else \"0\" + str(a.hour),\n a.minute if a.minute > 9 else \"0\" + str(a.minute),\n a.second if a.second > 9 else \"0\" + str(a.second))\n try:\n print(\"Packing web_static to \" + file_name)\n local(\"mkdir -p versions\")\n\n local(\"tar -cvzf \" + file_name + \" web_static\")\n return file_name\n except:\n return None", "def do_pack():\n files = 'versions/web_static_{}{}{}{}{}{}.tgz'\\\n .format(T.year, T.month, T.day, T.hour, T.minute, T.second)\n local('mkdir -p versions')\n execute = local(\"tar -cvzf \" + files + \" ./web_static/\")\n if execute.succeeded:\n return files\n return None", "def do_pack():\n\n sd = '{0:%Y%m%d%H%M%S}'.format(datetime.now())\n fname = 'versions/web_static_' + sd + '.tgz'\n local('mkdir -p versions')\n rs = local('tar -cvzf ' + fname + ' web_static')\n\n if rs.succeeded:\n return fname\n return None", "def do_pack():\n d = datetime.now()\n local(\"mkdir -p versions\")\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz\\\n'.format(d.year, d.month, d.day, d.hour, d.minute, d.second)\n status = local(\"tar -cvzf\" + file_name + \" ./web_static/\", capture=True)\n if status.succeeded:\n return file_name\n return None", "def package(target, source, env):\n\n # Print out.\n print('')\n print(\"#######################\")\n print(\"# Packaging the files #\")\n print(\"#######################\")\n\n # List of distribution files.\n type_list = [env['DIST_TYPE']]\n if type_list[0] == 'ALL':\n type_list = ['zip', 'tar']\n\n # Loop over the distribution files.\n for dist_type in type_list:\n # The file name.\n if dist_type == 'zip':\n file = env['DIST_FILE'] + '.zip'\n elif dist_type == 'tar':\n file = env['DIST_FILE'] + '.tar.bz2'\n elif dist_type == 'dmg':\n file = env['DIST_FILE'] + '.dmg'\n\n # Print out.\n print(\"\\n\\nCreating the package distribution \" + repr(file) + \".\\n\")\n\n # Create the special Mac OS X DMG file and then stop execution.\n if dist_type == 'dmg':\n # Create the Mac OS X universal application.\n print(\"\\n# Creating the Mac OS X universal application.\\n\\n\")\n cmd = '%s setup.py py2app' % sys.executable\n print(\"%s\\n\" % cmd)\n pipe = Popen(cmd, shell=True, stdin=PIPE, close_fds=False)\n waitpid(pipe.pid, 0)\n\n # Create the dmg image.\n print(\"\\n\\n# Creating the DMG image.\\n\\n\")\n cmd = 'hdiutil create -ov -fs HFS+ -volname \"relax\" -srcfolder dist/relax.app ../%s' % file\n print(\"%s\\n\" % cmd)\n pipe = Popen(cmd, shell=True, stdin=PIPE, close_fds=False)\n waitpid(pipe.pid, 0)\n\n # Stop executing.\n return\n\n # Open the Zip distribution file.\n if dist_type == 'zip':\n archive = ZipFile(path.pardir + path.sep + file, 'w', compression=8)\n\n # Open the Tar distribution file.\n elif dist_type == 'tar':\n if search('.bz2$', file):\n archive = TarFile.bz2open(path.pardir + path.sep + file, 'w')\n elif search('.gz$', file):\n archive = TarFile.gzopen(path.pardir + path.sep + file, 'w')\n else:\n archive = TarFile.open(path.pardir + path.sep + file, 'w')\n\n # Base directory.\n base = getcwd() + sep\n\n # Walk through the directories.\n for root, dirs, files in walk(getcwd()):\n # Skip the subversion directories.\n if search(\"\\.svn\", root):\n continue\n\n # Add the files in the current directory to the archive.\n for i in range(len(files)):\n # Skip any '.sconsign' files, hidden files, byte-compiled '*.pyc' files, or binary objects '.o', '.os', 'obj', 'lib', and 'exp'.\n if search(\"\\.sconsign\", files[i]) or search(\"^\\.\", files[i]) or search(\"\\.pyc$\", files[i]) or search(\"\\.o$\", files[i]) or search(\"\\.os$\", files[i]) or search(\"\\.obj$\", files[i]) or search(\"\\.lib$\", files[i]) or search(\"\\.exp$\", files[i]):\n continue\n\n # Create the file name (without the base directory).\n name = path.join(root, files[i])\n name = name[len(base):]\n print('relax-' + version + path.sep + name)\n\n # The archive file name.\n arcname = 'relax-' + version + path.sep + name\n\n # Zip archives.\n if dist_type == 'zip':\n archive.write(filename=name, arcname=arcname)\n\n # Tar archives.\n if dist_type == 'tar':\n archive.add(name=name, arcname=arcname)\n\n # Close the archive.\n archive.close()\n\n # Final printout.\n print(\"\\n\\n\\n\")", "def unpack(file, path='.'):\n assert isfile(file)\n assert isdir(path)\n\n for implementor in [GzipTarredFile, ZippedFile, Bzip2TarredFile]:\n if implementor.is_valid(file):\n with console.cd(path):\n return [implementor(file).extract(), implementor]\n else:\n raise InvalidFile, 'compressed file format unknown: %s' % file", "def do_pack():\n\n datenow = datetime.now()\n full_date = datenow.strftime(\"%Y%m%d%H%M%S\")\n\n try:\n if not os.path.isdir(\"versions\"):\n local(\"mkdir versions\")\n local_command = local(\"tar -cvzf versions/web_static_{}.tgz web_static\"\n .format(full_date))\n return local_command\n except Exception:\n return None", "def debianize( strFilename ):\n \n #~ data = gzip.GzipFile( strFilename ).read();\n #~ print data;\n #~ return;\n \n #~ data = gzip.open( strFilename ).read();\n #~ print data;\n #~ return; \n \n #~ uncompressedData = bz2.BZ2File(strFilename).read()\n #~ print str(uncompressedData)\n #~ return;\n \n #~ file = open( strFilename, 'rb' );\n #~ data = file.read();\n #~ file.close();\n #~ print debug.dumpHexa( data );\n \n #~ ar = tarfile.open(strFilename, 'r:*')\n #~ for item in ar:\n #~ print( str(item) );\n #~ print( \"%s:\" % item.name );\n #~ #print debug.dumpHexa(item.buf);\n #~ #print zlib.decompress(item.buf)\n #~ #print zlib.decompress(ar.extractfile(item).read())\n #~ data = ar.extractfile(item.name).read()\n #~ print data # works !\n #~ ar.close() \n #~ return;\n \n fileLists = [];\n file = open( strFilename );\n data = file.read();\n file.close();\n \n print( \"data len: %d\" % len( data ) );\n\n nDataCompressedOffset = 0; # 132\n\n # works fine on toto.gz\n #~ f = gzip.open(strFilename, 'rb')\n #~ file_content = f.read()\n #~ print file_content\n #~ f.close() \n \n #~ decompressor = bz2.BZ2Decompressor();\n #~ uncompressed = decompressor.decompress(data[nDataCompressedOffset:]);\n \n #~ uncompressed = zlib.decompress(data[nDataCompressedOffset:]);\n \n uncompressed = decompress( data );\n print( \"uncompressed: %s\" % str( uncompressed ) );", "def compress(self, src, dst):\n info = readelf_get_info(src)\n starting_size = os.path.getsize(src)\n if starting_size != info[\"size\"]:\n raise RuntimeError(\"size of file '%s' differs from header claim: %i != %i\" %\n (src, starting_size, info[\"size\"]))\n rfd = open(src, \"rb\")\n wfd = open(dst, \"wb\")\n data = rfd.read(starting_size)\n wfd.write(data[info[\"entry\"]:])\n rfd.close()\n wfd.close()\n self.__uncompressed_size = len(data) - info[\"entry\"]\n if is_verbose():\n print(\"Wrote compressable program block '%s': %i bytes\" % (dst, self.__uncompressed_size))\n self.__contexts = []\n self.__weights = []\n (so, se) = run_command([self.__command, dst])\n lines = so.split(\"\\n\")\n for ii in lines:\n terms = ii.split()\n if terms and terms[0].startswith(\"Final\"):\n compressed_size = int(terms[1])\n for jj in terms[2:]:\n individual_term = jj.split(\"*\")\n self.__weights += [int(individual_term[0], 10)]\n self.__contexts += [int(individual_term[1], 16)]\n if is_verbose():\n print(\"Program block compressed into '%s': %i bytes\" % (dst + \".pack\", compressed_size))\n print(\"Compression weights: %s\" % (str(self.__weights)))\n print(\"Compression contexts: %s\" % (str(self.__contexts)))\n rfd = open(dst + \".pack\", \"rb\")\n compressed_contexts = []\n compressed_weights = []\n uncompressed_size = rfd.read(4)\n uncompressed_size = (struct.unpack(\"I\", uncompressed_size))[0]\n if uncompressed_size != self.__uncompressed_size:\n raise RuntimeError(\"size given to packer does not match size information in file: %i != %i\" %\n (self.__uncompressed_size, uncompressed_size))\n context_count = rfd.read(1)\n context_count = (struct.unpack(\"B\", context_count))[0]\n for ii in range(context_count):\n compressed_weights += struct.unpack(\"B\", rfd.read(1))\n for ii in range(context_count):\n compressed_contexts += struct.unpack(\"B\", rfd.read(1))\n if compressed_contexts != self.__contexts:\n raise RuntimeError(\"contexts reported by packer do not match context information in file: %s != %s\" %\n (str(self.__contexts), str(compressed_contexts)))\n if compressed_weights != self.__weights:\n raise RuntimeError(\"weights reported by packer do not match weight information in file: %s != %s\" %\n (str(self.__weights), str(compressed_weights)))\n read_data = rfd.read()\n rfd.close()\n if len(read_data) != compressed_size:\n raise RuntimeError(\"size reported by packer does not match length of file: %i != %i\" %\n (compressed_size, len(read_data)))\n self.__data = []\n for ii in read_data:\n self.__data += struct.unpack(\"B\", ii)", "def do_pack():\n time = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n file_name = \"versions/web_static_{}.tgz\".format(time)\n try:\n local(\"mkdir -p ./versions\")\n local(\"tar --create --verbose -z --file={} ./web_static\"\n .format(file_name))\n return file_name\n except:\n return None", "def receiveAndUnpack(binary, filename):\n inputfile = open(filename, 'wb')\n inputfile.write(binary)", "def pack_file(zip_write, filename: str, suppress_error=False):\n if '\\t' in filename:\n # We want to rename the file!\n filename, arcname = filename.split('\\t')\n else:\n arcname = filename\n\n if filename[-1] == '*':\n # Pack a whole folder (blah/blah/*)\n directory = filename[:-1]\n file_count = 0\n for poss_path in RES_ROOT:\n dir_path = os.path.normpath(\n os.path.join(poss_path, directory)\n )\n if not os.path.isdir(dir_path):\n continue\n for subfile in os.listdir(dir_path):\n full_path = os.path.join(dir_path, subfile)\n rel_path = os.path.join(directory, subfile)\n zip_write(\n filename=full_path,\n arcname=rel_path,\n )\n file_count += 1\n LOGGER.info('Packed {} files from folder \"{}\"', file_count, directory)\n return\n\n for poss_path in RES_ROOT:\n full_path = os.path.normpath(\n os.path.join(poss_path, filename)\n )\n if os.path.isfile(full_path):\n zip_write(\n filename=full_path,\n arcname=arcname,\n )\n break\n else:\n if not suppress_error:\n LOGGER.warning(\n '\"bee2/' + filename + '\" not found! (May be OK if not custom)'\n )", "def do_pack():\n try:\n date = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n if isdir(\"versions\") is False:\n local(\"mkdir versions\")\n file_name = \"versions/web_static_{}.tgz\".format(date)\n local(\"tar -cvzf {} web_static\".format(file_name))\n return file_name\n except BaseException:\n return None", "def do_pack():\n with api.settings(warn_only=True):\n isdir = os.path.isdir(\"versions\")\n if not isdir:\n mkdir = api.local(\"mkdir versions\")\n if mkdir.failed:\n return None\n sfx = datetime.now().strftime(\"%Y%m%d%M%S\")\n path = \"versions/web_static_{:s}.tgz\".format(sfx)\n tar = api.local(\"tar -cvzf {:s} web_static\".format(path))\n if tar.failed:\n return None\n size = os.stat(path).st_size\n print(\"wb_static packed: {} -> {}Bytes\".format(path, size))\n return path", "def do_pack():\n time = datetime.now()\n file = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n time.year,\n time.month,\n time.day,\n time.hour,\n time.minute,\n time.second\n )\n local('mkdir -p versions')\n if local('tar -cvzf ' + file + ' web_static').succeeded:\n return file\n return None", "def do_pack():\n a = datetime.now()\n file_name = \"versions/web_static_{}{}{}{}{}{}.tgz\".format(a.year,\n a.month,\n a.day,\n a.hour,\n a.minute,\n a.second)\n try:\n print(\"Packing web_static to \" + file_name)\n local(\"mkdir -p versions\")\n\n local(\"tar -cvzf \" + file_name + \" web_static\")\n return file_name\n except:\n return None", "def do_pack():\n try:\n if isdir('versions') is False:\n local(\"mkdir versions\")\n tgz_file = \"versions/web_static_{}.tgz\".format(\n time.strftime(\"%Y%m%d%H%M%S\"))\n local(\"tar -cvzf {} web_static\".format(tgz_file))\n return tgz_file\n except:\n return None", "def main():\n parser = argparse.ArgumentParser(description='Create packaged set of modulefiles for deployment on OASIS.')\n parser.add_argument('--location', dest='location', default=None,\n help='Location directory to place files in')\n parser.add_argument('--tarfile', dest='tarfile', default=None,\n help='Name of tarfile to generate')\n args = parser.parse_args(sys.argv[1:])\n if args.location is None:\n args.location = tempfile.mkdtemp()\n elif os.path.exists(args.location):\n overwrite = raw_input(\"{0} exists, overwrite? \".format(args.location))\n if overwrite.lower().strip() != 'y':\n sys.stderr.write(\"Exiting...\")\n sys.exit(0)\n shutil.rmtree(args.location)\n os.mkdir(args.location)\n else:\n os.mkdir(args.location)\n location = checkout_repo(args.location) \n if location is None:\n sys.stderr.write(\"Can't checkout modulefiles to {0}!\\n\".format(args.location))\n package_files(location)\n if args.tarfile is None:\n args.tarfile = \"/tmp/moduleupdate.tar.gz\"\n if tar_files(location, args.tarfile) is None:\n sys.stderr.write(\"Error generating tarfile, exiting\\n\")\n sys.exit(1)\n shutil.rmtree(location)\n sys.stdout.write(\"Packaged files located at {0}\\n\".format(args.tarfile))", "def process_archive(self, file):\n self.recursive_archive_depth += 1\n # LOG: write_log or somehow log the archive file here\n if self.recursive_archive_depth >= self.max_recursive_depth:\n file.make_dangerous('Archive bomb')\n else:\n tempdir_path = file.make_tempdir()\n # TODO: double check we are properly escaping file.src_path\n # otherwise we are running unvalidated user input directly in the shell\n command_str = '{} -p1 x \"{}\" -o\"{}\" -bd -aoa'\n unpack_command = command_str.format(SEVENZ_PATH,\n file.src_path, tempdir_path)\n self._run_process(unpack_command)\n self.process_dir(tempdir_path, file.dst_path)\n self.safe_rmtree(tempdir_path)\n self.recursive_archive_depth -= 1", "def compress_feats(self):\n mkpath(os.path.join(self.base_update_path,'comp_features'))\n mkpath(os.path.join(self.base_update_path,'comp_idx'))\n args = [self.base_update_path+'/', str(self.features_dim), '1', self.master_update_file, str(self.bits_num)]\n subprocess_command = [self.hashing_execpath+\"compress_feats\"] + args\n # this will work only if features to be compressed are present in self.base_update_path/features\n proc = subprocess.Popen(subprocess_command, stdout=subprocess.PIPE)\n print \"[HasherSwig.compress_feats: log] running command: {}\".format(subprocess_command)\n (out, err) = proc.communicate()\n print \"[HasherSwig.compress_feats: log] program output:\", out\n print \"[HasherSwig.compress_feats: log] program error:\", err", "def deploy():\n myfile = do_pack()\n if myfile is None:\n return False\n return do_deploy(myfile)", "def do_pack():\n with api.settings(warn_only=True):\n isdir = os.path.isdir('versions')\n if not isdir:\n mkdir = api.local('mkdir versions')\n if mkdir.failed:\n return False\n suffix = datetime.now().strftime('%Y%m%d%M%S')\n path = 'versions/web_static_{}.tgz'.format(suffix)\n tar = api.local('tar -cvzf {} web_static'.format(path))\n if tar.failed:\n return False\n size = os.stat(path).st_size\n print('web_static packed: {} -> {}Bytes'.format(path, size))\n return path", "def create_binpackage(user, repo, filename, distro, config):\n pkgtype = get_pkgtype(filename)\n conttype = \"application/x-{}\".format(pkgtype)\n distid = str(get_distid(pkgtype, distro, config))\n\n url = \"{}/repos/{}/{}/packages.json\".format(config['url_base'],\n user, repo)\n\n menc = MultipartEncoder(fields={'package[distro_version_id]': distid,\n 'package[package_file]':\n (filename, open(filename, 'rb'),\n conttype)})\n\n if config['debug']:\n print(\"DEBUG: Request ({}) {}\".format('POST', url))\n print(\"DEBUG: {}\".format(menc))\n\n try:\n resp = post(url, data=menc,\n headers={'Content-Type': menc.content_type})\n resp.raise_for_status()\n result = resp.json()\n except (HTTPError, ConnectionError, Timeout, IOError) as ex:\n abort(ex.message)\n\n return result", "def do_pack():\n now = datetime.now()\n file_name = \"web_static_{}{}{}{}{}{}.tgz\".format(\n now.year,\n now.month,\n now.day,\n now.hour,\n now.minute,\n now.second\n )\n try:\n local(\"sudo tar -cvzf {} ./web_static\".format(file_name))\n local(\"sudo mkdir -p versions\")\n local(\"sudo mv ./{} versions/\".format(file_name))\n except:\n return (None)\n return (\"versions/{}\".format(file_name))", "def do_pack():\n try:\n if os.path.isdir(\"versions\") is False:\n os.mkdir(\"versions\")\n time = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n packed = 'versions/web_static_' + time + '.tgz'\n fabric.api.local(\"tar -cvzf {} web_static\".format(packed))\n return packed\n except:\n return None", "def read (self, file):\n\t\tself.unpack (file.read (self.size()))", "def do_pack():\n\n local(\"mkdir -p versions\")\n current = dt.now()\n current = current.now()\n tgz = \"web_static_{}.tgz\".format(current.strftime(\"%Y%m%d%H%M%S\"))\n working = local(\"tar -cavf versions/{} web_static\".format(tgz))\n\n if working.failed:\n return None\n else:\n return \"versions/{}\".format(tgz)", "def do_pack():\n date = datetime.datetime.now()\n archive = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(date.year,\n date.month,\n date.day,\n date.hour,\n date.minute,\n date.second)\n local('mkdir -p versions')\n check = local('tar -cvzf {} web_static'.format(archive))\n if check.failed:\n return None\n else:\n return archive", "def buildBundle(self, name=\"xepbundle\"):\n fltr = os.path.join(os.path.abspath(self.outpath), '*.pdf')\n files = sorted(glob.glob(fltr))\n tar = tarfile.open(\n os.path.join(self.outpath, \"{}.tar.bz2\".format(name)),\n \"w:bz2\")\n for name in files:\n tar.add(\n name, arcname=\"xepbundle/{}\".format(os.path.basename(name)))\n tar.close()", "def test_make_fna(self):\r\n fna_fp = os.path.join(self.sff_dir, 'test.fna')\r\n fna_gz_fp = os.path.join(self.gz_sff_dir, 'test_gz.fna')\r\n make_fna(self.sff_fp, fna_fp)\r\n make_fna(self.sff_gz_fp, fna_gz_fp)\r\n self.assertEqual(open(fna_fp).read(), fna_txt)\r\n self.assertEqual(open(fna_gz_fp).read(), fna_txt)", "def do_pack():\n time_f = '%Y%m%d%H%M%S'\n try:\n if not os.path.exists('versions'):\n local('mkdir versions')\n to = 'versions/web_static_{}.tgz'.format(\n datetime.now().strftime(time_f))\n\n local('tar -cvzf {} web_static'.format(to))\n return(to)\n except:\n return (None)", "def vcf_compress(fn):\n ret = cmd_exe(f\"vcf-sort {fn} | bgzip > {fn}.gz && tabix {fn}.gz\")", "def do_pack():\n date = (datetime.strftime(datetime.now(), \"%Y%m%d%H%M%S\"))\n name = \"versions/web_static_{}.tgz\".format(date)\n\n if not os.path.exists(\"./versions/\"):\n os.makedirs(\"./versions/\")\n try:\n local(\"tar -cvzf {} web_static\".format(name))\n return (name)\n except:\n return (None)", "def testTarBundling(self):\n try:\n tP = os.path.join(self.__workPath, \"t0.tar.gz\")\n dirPath = os.path.join(self.__inpDirPath, \"topdir\")\n\n ok = self.__fileU.bundleTarfile(tP, [dirPath], mode=\"w:gz\", recursive=True)\n self.assertTrue(ok)\n\n numBytes = self.__fileU.size(tP)\n self.assertGreaterEqual(numBytes, 250)\n #\n md5 = self.__fileU.hash(tP, hashType=\"md5\")\n self.assertTrue(md5 is not None)\n #\n ok = self.__fileU.unbundleTarfile(tP, dirPath=self.__workPath)\n self.assertTrue(ok)\n #\n tP = os.path.join(self.__workPath, \"t1.tar.gz\")\n dirPathList = [os.path.join(self.__inpDirPath, \"topdir\", \"subdirA\"), os.path.join(self.__inpDirPath, \"topdir\", \"subdirB\")]\n\n ok = self.__fileU.bundleTarfile(tP, dirPathList, mode=\"w:gz\", recursive=True)\n self.assertTrue(ok)\n #\n ok = self.__fileU.unbundleTarfile(tP, dirPath=self.__workPath)\n self.assertTrue(ok)\n\n tP = os.path.join(self.__workPath, \"t2.tar\")\n dirPathList = [os.path.join(self.__inpDirPath, \"topdir\", \"subdirA\"), os.path.join(self.__inpDirPath, \"topdir\", \"subdirB\")]\n\n ok = self.__fileU.bundleTarfile(tP, dirPathList, mode=\"w\", recursive=True)\n self.assertTrue(ok)\n #\n ok = self.__fileU.unbundleTarfile(tP, dirPath=self.__workPath)\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def handle_package(self, prime_dir, bases_config: BasesConfiguration):\n emit.progress(\"Creating the package itself\")\n zipname = format_charm_file_name(self.config.name, bases_config)\n zipfh = zipfile.ZipFile(zipname, \"w\", zipfile.ZIP_DEFLATED)\n for dirpath, _dirnames, filenames in os.walk(prime_dir, followlinks=True):\n dirpath = pathlib.Path(dirpath)\n for filename in filenames:\n filepath = dirpath / filename\n zipfh.write(str(filepath), str(filepath.relative_to(prime_dir)))\n\n zipfh.close()\n return zipname", "def dumpf(self, gzip=False):\n if 0 != len(self.sources):\n os.mkdir(self.name)\n filename = os.path.join(self.name, 'bootstrap.sh')\n f = codecs.open(filename, 'w', encoding='utf-8')\n elif gzip:\n filename = '{0}.sh.gz'.format(self.name)\n f = gziplib.open(filename, 'w')\n else:\n filename = '{0}.sh'.format(self.name)\n f = codecs.open(filename, 'w', encoding='utf-8')\n f.write(self.comment)\n f.write('cd \"$(dirname \"$0\")\"\\n')\n for filename2, content in sorted(self.sources.iteritems()):\n f2 = open(os.path.join(self.name, filename2), 'w')\n f2.write(content)\n f2.close()\n for out in self.out:\n f.write(out)\n f.close()\n if gzip and 0 != len(self.sources):\n filename = 'sh-{0}.tar.gz'.format(self.name)\n tarball = tarfile.open(filename, 'w:gz')\n tarball.add(self.name)\n tarball.close()\n return filename\n return filename", "def execute(self):\n cwd = self.fm.thisdir\n marked_files = cwd.get_selection()\n\n if not marked_files:\n return\n\n def refresh(_):\n cwd = self.fm.get_directory(original_path)\n cwd.load_content()\n\n original_path = cwd.path\n parts = self.line.split()\n au_flags = parts[1:]\n\n descr = \"compressing files in: \" + os.path.basename(parts[1])\n obj = CommandLoader(args=['apack'] + au_flags + \\\n [os.path.relpath(f.path, cwd.path) for f in marked_files], descr=descr)\n\n obj.signal_bind('after', refresh)\n self.fm.loader.add(obj)", "def do_install(self, args):\n if args:\n try:\n plugin_name, file_path = args.split()[0], args.split()[1]\n except Exception as e:\n return print(display_messages(\"the argument is invalid please type ?install for more information\", error=True))\n if not path.isfile(file_path):\n return print(\n display_messages(\n \"the file {} not found \".format(file_path), error=True\n )\n )\n head, tail = os.path.split(file_path)\n dest = copyfile(file_path, \"{}/{}\".format(self.temp_path, tail))\n print(display_messages(\"copy content file .zip to {}\".format(dest), info=True))\n \n path_to_zip_file = tempfile.gettempdir() + \"/{}\".format(tail)\n with ZipFile(path_to_zip_file, \"r\") as zip_ref:\n zip_ref.extractall(tempfile.gettempdir())\n temp_path_file_extracted = \"{}/{}.py\".format(self.temp_path, plugin_name)\n print(\n display_messages(\n \"extracted files on : {}\".format(temp_path_file_extracted), info=True\n )\n )\n if not path.isfile(temp_path_file_extracted):\n return print(\n display_messages(\n \"the file {} not found \".format(temp_path_file_extracted), error=True\n )\n )\n temp_templates_path = \"{}/{}\".format(self.temp_path, plugin_name)\n if not path.isdir(temp_templates_path):\n return print(\n display_messages(\n \"the directory template {} not found \".format(temp_templates_path), error=True\n )\n )\n source = temp_path_file_extracted\n destination = \"{}/{}.py\".format(self.captiveflask_setup_path, plugin_name)\n dest = copyfile(source, destination)\n print(display_messages(\"copy content file to {}\".format(dest), info=True))\n\n copy_tree(\n temp_templates_path, C.user_config_dir + \"/config/templates/{}\".format(plugin_name)\n )\n print(\n display_messages(\n \"plugin {} install {}\".format( plugin_name,setcolor(\"sucessful\", color=\"green\")),\n info=True,\n )\n )\n return \n print(\n display_messages(\"unknown command: {} \".format(args), error=True)\n )", "def do_pack():\n\n now = datetime.now()\n time_now = now.strftime(\"%Y%m%d%H%M%S\")\n archive_name = \"versions/web_static_\" + time_now + \".tgz\"\n local('mkdir -p versions')\n archive_command = local(\"tar -zcvf \" + archive_name + \" web_static\")\n\n if archive_command.succeeded:\n return archive_name\n\n return None", "def import_file(filename):\n if not os.path.exists(filename): return 0\n if zipfile.is_zipfile(filename):\n infp = zipfile.ZipFile(filename)\n elif tarfile.is_tarfile(filename):\n infp = tarfile.TarFile(filename)\n else: # regular file\n infp = RegFile(filename)\n name_list =infp.namelist()\n director = {}\n VALUES = {} \n if \"USERNAME\" in os.environ:\n VALUES[\"USER\"] = os.environ[\"USERNAME\"] # NameId\n if \"HOMEPATH\" in os.environ:\n VALUES[\"HOME\"] = 'C:' + os.sep + os.environ[\"HOMEPATH\"]\n if \"HOME\" in os.environ:\n VALUES[\"HOME\"] = os.environ[\"HOME\"]\n if \"USERPROFILE\" in os.environ:\n VALUES[\"HOME\"] = os.environ[\"USERPROFILE\"]\n globalspath, f = myro.globvars.__file__.rsplit(os.sep, 1)\n #print \"globalspath:\", globalspath\n myropath, f = globalspath.rsplit(os.sep, 1)\n #print \"myropath:\", myropath\n sitepath, f = myropath.rsplit(os.sep, 1)\n #print \"sitepath:\", sitepath\n myroparts = myropath.split(os.sep)\n pythonpath = myroparts[0] + os.sep + myroparts[1]\n VALUES[\"DESKTOP\"] = VALUES[\"HOME\"] + os.sep + \"DESKTOP\" \n VALUES[\"PYTHONDIR\"] = pythonpath\n VALUES[\"MYRODIR\"] = myropath\n VALUES[\"PYTHONSITEDIR\"] = sitepath\n VALUES[\"PYTHONDIR\"] = pythonpath\n install_count = 0\n if \"MANIFEST\" in name_list:\n manifest = infp.read(\"MANIFEST\")\n lines = manifest.split(\"\\n\")\n for line in lines:\n if \":\" in line:\n f, dest = map(string.strip, line.strip().split(\":\"))\n director[f] = dest % VALUES\n for name in name_list:\n if name == \"MANIFEST\": continue\n contents = infp.read(name)\n print \" writing:\", director[name], \"...\"\n # first write to temp file:\n try:\n outfp = open(director[name], \"wb\")\n except:\n makePath(director[name])\n outfp = open(director[name], \"wb\")\n outfp.write(contents)\n outfp.close()\n install_count += 1\n else:\n print \" ERROR: no MANIFEST in Myro upgrade; skipping\"\n infp.close()\n return install_count", "def do_pack():\n\n local('mkdir -p versions')\n\n time = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n file_time = 'versions/web_static_{}.tgz'.format(time)\n\n compressed = local(\"tar -cvzf \" + file_time + \" web_static/\")\n\n if compressed.succeeded:\n return file_time\n return None", "def run(env: Environment):\n\n package_directory = env.args.get('package_directory') or '.'\n output_directory = env.args.get('output_directory')\n\n directory = os.path.realpath(package_directory)\n if not os.path.exists(directory):\n raise NotADirectoryError('No such directory \"{}\"'.format(directory))\n\n save_directory = (\n os.path.realpath(output_directory)\n if output_directory else\n directory\n )\n\n bundle_directory = tempfile.mkdtemp(prefix='pipper-bundle-')\n\n try:\n print('[COMPILE]: Creating universal wheel')\n distribution_data = create_wheel(directory, bundle_directory)\n print('[COLLECT]: Creating package metadata')\n create_meta(directory, bundle_directory, distribution_data)\n print('[ASSEMBLE]: Creating pipper package bundle')\n path = zip_bundle(bundle_directory, save_directory, distribution_data)\n print('[BUNDLED]:', path)\n except Exception:\n raise\n finally:\n shutil.rmtree(bundle_directory)", "def run(self):\n self.compress(\n self.__config.public_key(),\n self.__config.input_dir(),\n self.__config.output_dir(),\n self.__config.suffix()\n )", "def do_pack():\n local(\"mkdir -p versions\")\n time = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n file = local(\"tar -czvf versions/web_static_%s.tgz web_static\" % time)\n if file:\n return \"versions/web_static_{}.tgz\".format(time)\n else:\n return None", "def genPSFimage(filename=None):\n hdu=pf.open(filename)\n nn = len(hdu)\n for i in range(1,nn):\n img = hdu[i].data[0][4:].reshape(npix,npix)\n img = img/img.sum()\n hdu[i].data = img\n #hdu.scale('int16', '', bzero=32768)\n newfilename = filename[:-7]+'_stamp.fits'\n hdu.writeto(newfilename)\n os.system('gzip '+newfilename)", "def emit_pack_instruction(self, *, loop_indices=None):", "def do_pack():\n now = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n local('mkdir -p versions')\n result = local('tar -czvf versions/web_static_{}.tgz web_static'\n .format(now))\n if result.failed:\n return None\n else:\n return result", "def do_pack():\n makedirs('versions', exist_ok=True)\n date = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n time.year, time.month, time.day, time.minute, time.second)\n check = local(\"tar -cvzf \" + date + \" ./web_static/\")\n if check.succeeded:\n return date\n return None", "def create_srcpackage(user, repo, filename, distro, config):\n conttype = \"application/x-dsc\"\n distid = str(get_distid('dsc', distro, config))\n fpath = dirname(filename)\n\n url = \"{}/repos/{}/{}/packages/contents.json\".format(config['url_base'],\n user, repo)\n\n menc = MultipartEncoder(fields={'package[distro_version_id]': distid,\n 'package[package_file]':\n (filename, open(filename, 'rb'),\n conttype)})\n\n if config['debug']:\n print(\"DEBUG: Request ({}) {}\".format('POST', url))\n print(\"DEBUG: {}\".format(menc))\n\n try:\n resp = post(url, data=menc,\n headers={'Content-Type': menc.content_type})\n resp.raise_for_status()\n result = resp.json()\n except (HTTPError, ConnectionError, Timeout, IOError) as ex:\n abort(ex.message)\n\n filelist = [('package[distro_version_id]', distid),\n ('package[package_file]',\n (filename, open(filename, 'rb'), conttype))]\n\n for srcfile in result['files']:\n srcfilename = '{}/{}'.format(fpath, srcfile['filename'])\n filelist.append(('package[source_files][]',\n (srcfilename,\n open(srcfilename, 'rb'),\n 'application/x-gzip')))\n\n url = \"{}/repos/{}/{}/packages.json\".format(config['url_base'],\n user, repo)\n\n menc = MultipartEncoder(fields=filelist)\n\n if config['debug']:\n print(\"DEBUG: Request ({}) {}\".format('POST', url))\n print(\"DEBUG: {}\".format(menc))\n\n try:\n resp = post(url, data=menc,\n headers={'Content-Type': menc.content_type})\n resp.raise_for_status()\n result = resp.json()\n except (HTTPError, ConnectionError, Timeout, IOError) as ex:\n abort(ex.message)\n\n return result", "def packaging(src):\n\twork_copy = osp.dirname(src)\n\t\n\taddon_info = \"\".join(open(work_copy + osp.sep + \"install.rdf\"))\n\taddon_name = re.search(\"(?<=em\\:name\\=\\\").*(?=\\\")\",addon_info).group(0)\n\taddon_version = re.search(\"(?<=em\\:version\\=\\\").*(?=\\\")\",addon_info).group(0)\n\n\ttemp_copy_base = tempfile.mkdtemp()\n\ttemp_copy = osp.join(temp_copy_base,addon_name)\n\t\n\txpi_name = \"%s-%s.xpi\" % (addon_name,addon_version)\n\txpi_fullpath = osp.join(work_copy,xpi_name);\n\t\n\tprint \"\"\"\n\tAdd-on : %s\n\tVersion : %s\n\tWork Copy : %s\n\tTemp Copy : %s\n\tXPI File : %s\n\t\"\"\" % (addon_name,addon_version,work_copy,temp_copy, xpi_name)\n\n\tprint \"copying work to temp dir...\"\n\tcopytree(work_copy,temp_copy,ignore=ignore_patterns('scriptdemo','*.xpi','.*','*.bat','*.py','*LOG','*~','*.swp'))\n\n\tprint \"packaging xpi...\"\n\tcompress(temp_copy,xpi_fullpath);\n\n\tprint \"cleaning...\"\n\trmtree(temp_copy_base)", "def generate_overlayfs_stacking(self, working_file_name):\n\n # Reopenthe working file\n working_file = open(working_file_name, \"a\")\n\n\n working_file.write(\"generate_overlayfs_stacking\\n\")\n\n # We are done here, now close the file\n working_file.close()", "def test_1_recv_pkgsend(self):\n\n f = fmri.PkgFmri(self.published[3], None)\n\n # First, retrieve the package.\n self.pkgrecv(self.durl1, \"--raw -d {0} {1}\".format(self.tempdir, f))\n\n # Next, load the manifest.\n basedir = os.path.join(self.tempdir, f.get_dir_path())\n mpath = os.path.join(basedir, \"manifest\")\n\n m = manifest.Manifest()\n raw = open(mpath, \"rb\").read()\n m.set_content(raw)\n\n # Verify that the files aren't compressed since -k wasn't used.\n # This is also the format pkgsend will expect for correct\n # republishing.\n ofile = file(os.devnull, \"rb\")\n for atype in (\"file\", \"license\"):\n for a in m.gen_actions_by_type(atype):\n if not hasattr(a, \"hash\"):\n continue\n\n ifile = file(os.path.join(basedir, a.hash),\n \"rb\")\n\n # Since the file shouldn't be compressed, this\n # should return a zlib.error.\n self.assertRaises(zlib.error,\n misc.gunzip_from_stream, ifile, ofile,\n ignore_hash=True)\n\n # Next, send it to another depot\n self.pkgsend(self.durl2, \"open foo@1.0-1\")\n self.pkgsend(self.durl2,\n \"include -d {0} {1}\".format(basedir, mpath))\n self.pkgsend(self.durl2, \"close\")", "def test_all_packs_creation(repo):\n pack_1 = repo.setup_one_pack('Pack1')\n pack_1.pack_metadata.write_json(\n {\n 'name': 'Pack Number 1',\n }\n )\n\n pack_2 = repo.setup_one_pack('Pack2')\n pack_2.pack_metadata.write_json(\n {\n 'name': 'Pack Number 2',\n }\n )\n\n with ChangeCWD(repo.path):\n with temp_dir() as temp:\n runner = CliRunner(mix_stderr=False)\n result = runner.invoke(main, [ARTIFACTS_CMD, '-a', temp, '-p', 'all'])\n\n assert result.exit_code == 0\n assert os.path.exists(os.path.join(str(temp), 'uploadable_packs', 'Pack1.zip'))\n assert os.path.exists(os.path.join(str(temp), 'uploadable_packs', 'Pack2.zip'))", "async def transform(self, file):\n\t\tpass", "def F(f):\n return datafile(f, __name__)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"pldmfwuppkgname\", help=\"Name of the PLDM FW update package\"\n )\n parser.add_argument(\"metadatafile\", help=\"Path of metadata JSON file\")\n parser.add_argument(\n \"images\",\n nargs=\"+\",\n help=(\n \"One or more firmware image paths, in the same order as \"\n \" ComponentImageInformationArea entries\"\n ),\n )\n\n args = parser.parse_args()\n image_files = args.images\n with open(args.metadatafile) as file:\n try:\n metadata = json.load(file)\n except ValueError:\n sys.exit(\"ERROR: Invalid metadata JSON file\")\n\n # Validate the number of component images\n if len(image_files) != len(metadata[\"ComponentImageInformationArea\"]):\n sys.exit(\n \"ERROR: number of images passed != number of entries \"\n \" in ComponentImageInformationArea\"\n )\n\n try:\n with open(args.pldmfwuppkgname, \"w+b\") as pldm_fw_up_pkg:\n component_bitmap_bit_length = write_pkg_header_info(\n pldm_fw_up_pkg, metadata\n )\n write_fw_device_identification_area(\n pldm_fw_up_pkg, metadata, component_bitmap_bit_length\n )\n write_component_image_info_area(\n pldm_fw_up_pkg, metadata, image_files\n )\n update_pkg_header_size(pldm_fw_up_pkg)\n write_pkg_header_checksum(pldm_fw_up_pkg)\n append_component_images(pldm_fw_up_pkg, image_files)\n pldm_fw_up_pkg.close()\n except BaseException:\n pldm_fw_up_pkg.close()\n os.remove(args.pldmfwuppkgname)\n raise", "def pack(output_filename, sources):\n dirname = os.path.dirname(output_filename)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n ext = os.path.splitext(output_filename)[1][1:]\n if ext == 'zip':\n pack_zip(output_filename, sources)\n elif ext in ('gz', 'tgz', 'bz2', 'tar'):\n pack_tar(output_filename, sources, ext)\n else:\n raise AttributeError('Output_filename must be an archive (ex: .tar.gz, .zip)')", "def main(gtfs_file, input_json_file):\n\n with open(input_json_file) as jsonfile:\n input_json = json.load(jsonfile)\n\n gtfs_feed = mzgtfs.feed.Feed(filename=gtfs_file)\n\n for fare_id, rules_attributes in input_json.iteritems():\n add_fare_id(gtfs_feed, fare_id, rules_attributes)\n\n files = ['fare_attributes.txt', 'fare_rules.txt']\n gtfs_feed.write('fare_attributes.txt', gtfs_feed.fares())\n gtfs_feed.write('fare_rules.txt', gtfs_feed.fare_rules())\n\n gtfs_feed.make_zip('output.zip', files=files, clone=gtfs_file)\n shutil.move('output.zip', gtfs_file)\n\n util.delete_temp_files(files)", "def do_pack():\n try:\n now = time.strftime(\"%Y%m%d%H%M%S\")\n local('mkdir -p ./versions')\n local('tar -cvzf versions/web_static_{}.tgz web_static'.format(now))\n return(\"versions/web_static_{}.tgz\".format(now))\n except:\n return None", "def _build_file_tree(self):\n # Build file tree with packmode and weigth info (# of file in the packmode)\n root = {\"packmode\": None, \"weight\": None, \"children\": {}}\n for filepath, packmode in self.override_packmode_map.items():\n node = root\n for part in filepath:\n node = node[\"children\"].setdefault(\n part, {\"packmode\": None, \"weight\": None, \"children\": {}}\n )\n node[\"weight\"] = 1\n node[\"packmode\"] = packmode\n return root", "def load(fp, slot_key=_DEFAULT_SLOT):\n\n with Pack(fp) as mlio_pack:\n return mlio_pack.load(slot_key)", "def generate_package_from_binary_filepath(input_path, options = None):\n import hashlib\n # create MD5\n blocksize = 65536\n fd = open(input_path, \"rb\")\n hasher = hashlib.md5()\n buf = fd.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = fd.read(blocksize)\n \n return generate_package_from_md5(hasher.hexdigest())", "def init_add_package_to_stack_process(stack, name, version, file_name):\n with settings(host_string=host, key_filename=key_filename):\n s = Stack(MAGIC_DOMAIN, stack, meta_path=\"/var/gachette/\", operator=StackOperatorRedis(redis_host=dd.REDIS_HOST))\n s.add_package(name, version=version, file_name=file_name)\n send_notification(\"stack #%s package %s (%s) added\" % (stack, name, version))", "def DoPack(options, args):\n client = GClient.LoadCurrentConfig(options)\n if not client:\n raise gclient_utils.Error(\"client not configured; see 'gclient config'\")\n if options.verbose:\n # Print out the .gclient file. This is longer than if we just printed the\n # client dict, but more legible, and it might contain helpful comments.\n print(client.ConfigContent())\n return client.RunOnDeps('pack', args)", "def do_pack():\n now = datetime.datetime.now()\n path = 'versions/web_static_' +\\\n '{}{}{}{}{}{}'.format(now.year, now.month,\n now.day, now.hour,\n now.minute, now.second) + '.tgz'\n\n local('mkdir -p versions')\n success = local('tar -cvzf {:s} web_static'.format(path), capture=True)\n if success.return_code == 0:\n return path", "def assemble(f):\r\n input_file = Path(f)\r\n if input_file.suffix != '.asm':\r\n raise Exception('Expected .asm file')\r\n\r\n result = parse_data(input_file.read_text())\r\n input_file.with_suffix('.hack').write_text(result)", "def test_5_recv_env(self):\n\n f = fmri.PkgFmri(self.published[3], None)\n\n os.environ[\"PKG_SRC\"] = self.durl1\n os.environ[\"PKG_DEST\"] = self.tempdir\n\n # First, retrieve the package.\n self.pkgrecv(command=\"--raw {0}\".format(f))\n\n # Next, load the manifest.\n basedir = os.path.join(self.tempdir, f.get_dir_path())\n mpath = os.path.join(basedir, \"manifest\")\n\n m = manifest.Manifest()\n raw = open(mpath, \"rb\").read()\n m.set_content(raw)\n\n # This is also the format pkgsend will expect for correct\n # republishing.\n ofile = file(os.devnull, \"rb\")\n for atype in (\"file\", \"license\"):\n for a in m.gen_actions_by_type(atype):\n if not hasattr(a, \"hash\"):\n continue\n\n ifile = file(os.path.join(basedir, a.hash),\n \"rb\")\n\n # Since the file shouldn't be compressed, this\n # should return a zlib.error.\n self.assertRaises(zlib.error,\n misc.gunzip_from_stream, ifile, ofile,\n ignore_hash=True)\n\n for var in (\"PKG_SRC\", \"PKG_DEST\"):\n del os.environ[var]", "def test_2_fusion(install_test_files, data_dir):\n with make_workdir() as workdir:\n cl = [\"bcbio_nextgen.py\",\n get_post_process_yaml(data_dir, workdir),\n os.path.join(data_dir, os.pardir, \"test_fusion\"),\n os.path.join(data_dir, \"run_info-fusion.yaml\")]\n subprocess.check_call(cl)", "def ReadDataPack(input_file):\n with open(input_file, \"rb\") as file:\n data = file.read()\n original_data = data\n\n # Read the header.\n version, num_entries, encoding = struct.unpack(\"<IIB\", data[:HEADER_LENGTH])\n if version != PACK_FILE_VERSION:\n print \"Wrong file version in \", input_file\n raise WrongFileVersion\n\n resources = {}\n if num_entries == 0:\n return DataPackContents(resources, encoding)\n\n # Read the index and data.\n data = data[HEADER_LENGTH:]\n kIndexEntrySize = 2 + 4 # Each entry is a uint16 and a uint32.\n for _ in range(num_entries):\n id, offset = struct.unpack(\"<HI\", data[:kIndexEntrySize])\n data = data[kIndexEntrySize:]\n next_id, next_offset = struct.unpack(\"<HI\", data[:kIndexEntrySize])\n resources[id] = original_data[offset:next_offset]\n\n return DataPackContents(resources, encoding)", "def package():\n pass", "def pack(**kwargs):\n require('repository')\n #if env.repository.startswith('svn://'):\n if env.repository.type == 'svn':\n execute(svn.pack, **kwargs)\n if env.repository.type == 'git':\n execute(git.pack, **kwargs)\n else:\n abort('Unsupported repository type %s' % env.repository)", "def generate():\n PackCommandExecutor().pack()\n GenerateCommandExecutor().generate()", "def unpackage():\n\n zipfileLoc = hou.ui.selectFile(title=\"please select a zipFile created by the package function\", pattern=\"*.zip\")\n if not zipfileLoc: \n \n return\n \n file_ = zipfile.ZipFile(hou.expandString(zipfileLoc), \"r\")\n\n isOke = False\n \n for name in file_.namelist():\n \n if name.endswith(\".hip\") or name.endswith(\".hipnc\"):\n \n isOke = True\n break\n \n if not isOke: \n \n return\n \n unpackLoc = hou.expandString(hou.ui.selectFile(title=\"please select a directory you wish to use to unpack the files to.\"))\n \n if not unpackLoc or not os.path.isdir(unpackLoc): \n \n return\n \n unzip(file_, unpackLoc)\n unpackageDir = os.path.dirname(file_.namelist()[0])\n otlsfiles = glob.glob(os.path.join(unpackLoc, unpackageDir, \"otls\", \"*\"))\n hipfile = glob.glob(os.path.join(unpackLoc, unpackageDir, \"*.hip*\"))\n \n if len(hipfile) != 1: \n \n return\n \n hou.hipFile.load(hipfile[0])\n \n for otl in otlsfiles:\n\n hou.hda.installFile(otl)", "def import_fusion_archive(filename, name=\"import\"):\n import_options = app().importManager.createFusionArchiveImportOptions(filename)\n\n document = app().importManager.importToNewDocument(import_options)\n imported_root = document.products[0].rootComponent\n\n bodies = []\n\n for body in imported_root.bRepBodies:\n bodies.append(brep().copy(body))\n for occurrence in imported_root.allOccurrences:\n for body in occurrence.bRepBodies:\n bodies.append(brep().copy(body))\n\n document.close(saveChanges=False)\n\n return BRepComponent(*bodies, name=name)", "def test_unpack(self):\n if not os.path.isfile(akrr_tar_gz):\n raise Exception(\"Should do test_packager first\")\n \n if os.path.exists(cfg.akrr_home):\n shutil.rmtree(cfg.akrr_home)\n \n if verbosity>=3: print \"\\n\"+\"~\"*80\n \n #start bash shell\n bash = self.getBash()\n \n output=bash.runcmd('tar -xvf {akrr_tar_gz} -C {above_akrr_home}'.format(akrr_tar_gz=akrr_tar_gz,above_akrr_home=os.path.abspath(os.path.join(cfg.akrr_home, \"..\"))),printOutput=True)\n output=bash.runcmd('export AKRR_HOME={akrr_home}'.format(akrr_home=cfg.akrr_home),printOutput=True)\n output=bash.runcmd('cd $AKRR_HOME',printOutput=True)\n output=bash.runcmd('pwd',printOutput=True)\n \n if verbosity>=3: print \"~\"*80\n #test some files presence\n filesToCheck=['src/akrr.py',\n 'src/akrrscheduler.py']\n for f in filesToCheck:\n self.assertEqual(os.path.isfile(os.path.abspath(os.path.join(cfg.akrr_home, f))), True, \"AKRR distribution archive can not be unpacked\")", "def _pack3(obj, fp, **options):\n global compatibility\n\n ext_handlers = options.get(\"ext_handlers\")\n\n if obj is None:\n _pack_nil(obj, fp, options)\n elif ext_handlers and obj.__class__ in ext_handlers:\n _pack_ext(ext_handlers[obj.__class__](obj), fp, options)\n elif isinstance(obj, bool):\n _pack_boolean(obj, fp, options)\n elif isinstance(obj, int):\n _pack_integer(obj, fp, options)\n elif isinstance(obj, float):\n _pack_float(obj, fp, options)\n elif compatibility and isinstance(obj, str):\n _pack_oldspec_raw(obj.encode('utf-8'), fp, options)\n elif compatibility and isinstance(obj, bytes):\n _pack_oldspec_raw(obj, fp, options)\n elif isinstance(obj, str):\n _pack_string(obj, fp, options)\n elif isinstance(obj, bytes):\n _pack_binary(obj, fp, options)\n elif isinstance(obj, list) or isinstance(obj, tuple):\n _pack_array(obj, fp, options)\n elif isinstance(obj, dict):\n _pack_map(obj, fp, options)\n elif isinstance(obj, datetime.datetime):\n _pack_ext_timestamp(obj, fp, options)\n elif isinstance(obj, Ext):\n _pack_ext(obj, fp, options)\n elif ext_handlers:\n # Linear search for superclass\n t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None)\n if t:\n _pack_ext(ext_handlers[t](obj), fp, options)\n else:\n raise UnsupportedTypeException(\n \"unsupported type: %s\" % str(type(obj)))\n else:\n raise UnsupportedTypeException(\n \"unsupported type: %s\" % str(type(obj)))" ]
[ "0.73205775", "0.65307117", "0.6463891", "0.64161015", "0.63932323", "0.60954076", "0.60115385", "0.6011121", "0.5907196", "0.57618356", "0.57582206", "0.57581705", "0.57459545", "0.5699288", "0.5696721", "0.56722516", "0.56167424", "0.56135565", "0.55333644", "0.55249065", "0.5480161", "0.54660547", "0.54642814", "0.54370123", "0.5430938", "0.5373096", "0.5364959", "0.53441405", "0.53363436", "0.53288686", "0.5319184", "0.53123504", "0.52851903", "0.52840465", "0.52838546", "0.5262437", "0.5245852", "0.52418727", "0.52211326", "0.5218071", "0.5215326", "0.5202973", "0.5196808", "0.5196161", "0.5189013", "0.51819944", "0.5177259", "0.51765573", "0.5156004", "0.51446706", "0.51384133", "0.51269174", "0.5126337", "0.5125516", "0.5121398", "0.51084334", "0.51030344", "0.5100441", "0.5098815", "0.5094701", "0.50946826", "0.50786495", "0.5073463", "0.50680107", "0.506176", "0.505886", "0.50582117", "0.50563294", "0.50423086", "0.50421286", "0.5032484", "0.5026823", "0.501031", "0.49985677", "0.49929816", "0.4983884", "0.49773014", "0.4973888", "0.49605423", "0.49553367", "0.4953608", "0.49487987", "0.4947442", "0.49412805", "0.49387634", "0.4936831", "0.49276155", "0.49254638", "0.49241823", "0.49197477", "0.49196166", "0.49159527", "0.49154517", "0.49129316", "0.4904513", "0.49017566", "0.48992565", "0.48948863", "0.48944217", "0.4880892" ]
0.63310814
5
get the image info structure Set default scale to 1.0. The other fields are 0 for numbers, or blank for strings
def get_image_info_struct(nimage, path_len, image_id_len=None, wcs_len=None, ext_len=None, extra_dtype=None): dt = get_image_info_dtype( path_len, image_id_len=image_id_len, wcs_len=wcs_len, ext_len=ext_len, extra_dtype=extra_dtype, ) data = np.zeros(nimage, dtype=dt) data['scale'] = 1.0 return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)", "def image_data_info(page):\n xObject = page['/Resources']['/XObject'].getObject()\n\n for obj_key in xObject:\n obj = xObject[obj_key]\n if obj['/Subtype'] == '/Image':\n width, height = (obj['/Width'], obj['/Height'])\n num_bytes = len(obj._data)\n density = num_bytes * 1.0 / (width * height)\n return {'width': width, 'height': height, 'size': num_bytes, 'density': density}\n\n return None", "def image_info(self):\n\n if not self._image_info:\n path_image_info = os.path.join(\n self._path, f\"ImageSet_{self._image['ImageSetID']}.ImageInfo\"\n )\n\n # Make sure the ImageInfo file really exists\n if not os.path.exists(path_image_info):\n self.logger.warning(\"ImageInfo path doesn't exist: %s\", path_image_info)\n return None\n\n self.logger.debug(\"Reading image data from: %s\", path_image_info)\n self._image_info = pinn_to_dict(path_image_info)\n\n return self._image_info", "def get_img_info(self, idx):\n\n image = self.get_img(idx)\n img_height = image.size[0]\n img_width = image.size[1]\n\n return {\"height\": img_height, \"width\": img_width}", "def get_image_format_for_scale(scale=1.0):\n img = read_single(1, -70, 0, scale)\n return img.shape", "def scalarInfo(img, cnt):\n\tm = cntInfo(img, cnt)\n\td = {\"perimeter\":m[\"perimeter\"], \"oreientation\":m[\"orientation\"], \"solidity\":m[\"solidity\"],\"height\":m[\"height\"], \"extent\":m[\"extent\"], \"aspect ratio\":m[\"aspect ratio\"], \"area\":m[\"area\"], \"sum intensity\":m[\"sum intensity\"], \"width\":m[\"width\"], \"equivalent diameter\": m[\"equivalent diameter\"], \"mean intensity\": m[\"mean intensity\"]}\n\treturn d", "def get_image_size(self):", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def __init__(self, data, pixscale = 7.77/43):\n self.data = data\n self.pixscale = pixscale", "def getimage(self):", "def _get_image_info(\n image_id: int,\n width: int,\n height: int,\n file_name: str,\n license_id=1,\n flickr_url=\"\",\n coco_url=\"\",\n date_captured=datetime.datetime.utcnow().isoformat(' ')):\n image_info = {\n \"id\": image_id,\n \"width\": width,\n \"height\": height,\n \"file_name\": file_name,\n \"license\": license_id,\n \"flickr_url\": flickr_url,\n \"coco_url\": coco_url,\n \"date_captured\": date_captured,\n }\n\n return image_info", "def get_image(self):\n if self._image is None:\n image_data = np.load(self.image_file)\n if not isinstance(image_data, np.ndarray):\n image_data = image_data['arr_0']\n self.meta_data = ImageWrapper.load_metadata(self.image_file+\".meta\")\n exposure_time = self.meta_data['exposure_time_us'] * 1e-6\n dark_level = float(self.meta_data['black_level'])\n # saturation_mask = image_data.max(axis=2) >= 4094\n image_data = np.clip((image_data.astype(np.float32) - dark_level),\n a_min=0.0, a_max=None) / exposure_time\n if self.original_vignetting is not None:\n image_data = image_data / self.original_vignetting\n if self.crop is not None:\n image_data = image_data[\n self.crop[1,0]:self.crop[1,1],\n self.crop[0,0]:self.crop[0,1]\n ]\n # saturation_mask = saturation_mask[\n # self.crop[1,0]:self.crop[1,1],\n # self.crop[0,0]:self.crop[0,1]\n # ]\n if self.down_sample is not None:\n image_data = cv2.resize(\n image_data,\n dsize=None,\n fx=1./self.down_sample,\n fy=1./self.down_sample,\n interpolation=cv2.INTER_AREA\n )\n # saturation_mask = cv2.resize(\n # saturation_mask,\n # dsize=None,\n # fx=1./self.down_sample,\n # fy=1./self.down_sample,\n # interpolation=cv2.INTER_AREA\n # )\n if self.reup_sample is not None:\n image_data = cv2.resize(\n image_data,\n dsize=None,\n fx=self.reup_sample,\n fy=self.reup_sample,\n interpolation=cv2.INTER_CUBIC\n )\n # saturation_mask = cv2.resize(\n # saturation_mask,\n # dsize=None,\n # fx=self.reup_sample,\n # fy=self.reup_sample,\n # interpolation=cv2.INTER_CUBIC\n # )\n image = torch.tensor(np.transpose(image_data, (2,0,1)), dtype=torch.float32, device=self.device)\n # saturation_mask = torch.tensor(saturation_mask, dtype=torch.float32, device=self.device)\n if not self.lazy:\n self._image = image\n # self._saturation_mask = saturation_mask\n else:\n image = self._image\n # saturation_mask = self._saturation_mask\n\n return image#, saturation_mask", "def GetScale(self):\n ...", "def format_img(self, img, C):\n img, ratio = self.format_img_size(img, C)\n img = self.format_img_channels(img, C)\n return img, ratio", "def __attrs_post_init__(self):\n self.key = uuid.uuid4().hex\n if self.properties is None:\n self.properties = {}\n if self.is_image:\n try:\n img_size = Image.open(self.open()).size\n self.properties.update(width=img_size[0], height=img_size[1])\n except IOError:\n self.content_type = 'application/octet-stream'", "def detail(self):\n return self.uniform(\"detail\",\n self.img_scale * .05,\n self.img_scale * .2)", "def get_image_characteristics(self):\r\n self.image_height, self.image_width, self.image_channels = self.image.shape\r\n\r\n # Estimate the cell size to be around a ninth of the width of the screenshot area\r\n self.cell_size = int(self.image_width / 9) | 1\r\n\r\n # Cell size should be at most a ninth of the width and at least a twentieth of the width of the screenshot\r\n # Since a typical grid is 9x9, so it should be at most a ninth of the image width, and it shouldn't be too small\r\n self.min_cell_size = int(self.image_width / 20 * self.image_width / 20)\r\n self.max_cell_size = int(self.image_width / 9 * self.image_width / 9)", "def small_image(self):\n pass", "def __build__(self,data_index=0):\n \n super(Image,self).__build__()\n # -- How to read the image\n self._build_properties = dict(\n data_index = data_index,\n header_exptime = \"EXPTIME\",\n dataslice0=\"undefined\",\n dataslice1=\"undefined\",\n bkgdbox={\"bh\":100,\"bw\":100,\"fh\":3,\"fw\":3},\n )", "def _createImageInfo(self, \n height,\n width, \n img_id=None, \n license=0, \n flickr_url='',\n coco_url='', \n date_captured=None,\n other=None,\n ):\n\n if date_captured is None:\n date_captured = datetime.datetime.utcnow().isoformat(' ')\n\n if img_id is not None:\n filename = self.imId2name(img_id)\n else:\n filename = None\n\n img_info={\"id\" : img_id,\n \"width\" : width,\n \"height\" : height,\n \"file_name\" : filename,\n \"license\" : license,\n \"flickr_url\" : flickr_url,\n \"coco_url\" : coco_url,\n \"date_captured\" : date_captured,\n \"other\": other\n }\n\n return img_info", "def getImg(img_name):\n img = Image.objects.raw({\"_id\": img_name}).first()\n in_dict = {}\n in_dict[\"name\"] = img.name\n in_dict[\"b64str\"] = img.b64str\n in_dict[\"imgsize\"] = img.imgsize\n in_dict[\"processed\"] = img.processed\n in_dict[\"timestamp\"] = img.timestamp\n return in_dict", "def _GetImageInfo(self,path):\n hd = Header(path, scan=True)\n hdr = hd.hdr\n self.hdr = hdr\n if hdr is None:\n# Either a ref.dat file or it isn't an imaging file.\n if 'ref' in path and 'dat' in path:\n self.refdats[os.path.realpath(path)] = True\n info = {'type':'refdat'}\n return info\n else:\n return None\n elif hdr['filetype'] == 'dicom' and not path.endswith('.yaml'):\n# Write a yaml file to the raw data directory if possible.\n dirname, outfile = self._yaml_filename(path)\n yaml_name = '%s/%s' % (dirname, outfile)\n if not os.path.exists(yaml_name):\n# Create yaml file using dirname,\n# e.g., ../anatomicals/S2_EFGRE3D/s2_efgre3d.yaml\n try:\n hd.write_hdr_to_yaml('%s/%s' % (dirname,outfile))\n except IOError:\n# This is a nonessential function, so ignore exceptions\n# such as access violations.\n pass\n elif hdr['filetype'] == 'dicom' or hdr['filetype'] == 'ge_ifile':\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n shdr = hdr['subhdr']\n nhdr = hdr['native_header']\n self.shdr = shdr\n if 'dti' in shdr.get('PulseSequenceName','').lower() \\\n or 'dti' in nhdr.get('PulseSequenceFile',''):\n psdname = 'dti'\n else:\n psdname = os.path.basename((shdr.get('PulseSequenceName','').strip()).lower())\n info = {'psdname':psdname, \\\n 'acqtime':shdr['AcqTime'], \\\n 'series':int(shdr['SeriesNumber']), \\\n 'plane':hdr['plane'].strip(), \\\n 'type':self.imgtype.get(psdname,None), \\\n 'plane':hdr['plane'], \\\n 'acqtime':shdr['SeriesTime'], \\\n# 'fmapdir':None, \\\n 'refdat':None, \\\n 'imgfile':None, \\\n 'base':None, \\\n 'tdim':int(hdr['tdim']), \\\n 'echo_spacing':None, \\\n 'filetype':'brik', \\\n 'suffix':self.suffix.get(hdr['filetype'], 'brik'), \\\n 'data_filetype':hdr['filetype']}\n if info['type'] == 'localizer':\n# Don't process the localizer.\n return info\n if isinstance(info['acqtime'], int):\n info['acquisition_time'] = time.ctime(info['acqtime'])\n if nhdr.get('ImageFormat',('unknown'))[0] == 'DERIVED' and info['type'] == 'epi':\n# Sometimes screenshots are defined as epis.\n info['type'] = None\n\n# Call the method appropriate to the type of scan in this series.\n stat = apply( self.GetInfoMethods.get(info['type'], self._NoInfo), \\\n [info, path])\n if stat:\n info = {'type':'break'}\n return info\n info['suffix'] = self.suffix.get(info['filetype'], 'brik')\n return info", "def set_attributes(self):\n\n pil_image = PILImage.open(self.path)\n\n # Get the exif data\n # Thanks https://gist.github.com/erans/983821\n exif_data = {}\n info = pil_image._getexif()\n if info:\n for tag, value in info.items():\n decoded = PILExifTags.TAGS.get(tag, tag)\n if decoded == \"GPSInfo\":\n gps_data = {}\n for t in value:\n sub_decoded = PILExifTags.GPSTAGS.get(t, t)\n gps_data[sub_decoded] = value[t]\n\n exif_data[decoded] = gps_data\n else:\n exif_data[decoded] = value\n\n gps_latitude = exif_data.get(\"GPSInfo\",{}).get(\"GPSLatitude\")\n gps_latitude_ref = exif_data.get(\"GPSInfo\",{}).get('GPSLatitudeRef')\n gps_longitude = exif_data.get(\"GPSInfo\",{}).get('GPSLongitude')\n gps_longitude_ref = exif_data.get(\"GPSInfo\",{}).get('GPSLongitudeRef')\n gps_altitude = exif_data.get(\"GPSInfo\",{}).get('GPSAltitude')\n gps_altitude_ref = exif_data.get(\"GPSInfo\",{}).get('GPSAltitudeRef')\n gps_direction = exif_data.get(\"GPSInfo\",{}).get('GPSImgDirection')\n gps_direction_ref = exif_data.get(\"GPSInfo\",{}).get('GPSImgDirectionRef')\n\n if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:\n lat = gps_tag_to_decimal_degress(gps_latitude)\n if gps_latitude_ref != \"N\": \n lat = 0 - lat\n\n lon = gps_tag_to_decimal_degress(gps_longitude)\n if gps_longitude_ref != \"E\":\n lon = 0 - lon\n\n # image attributes\n self.width, self.height = pil_image.size\n # exif attributes\n self.lat, self.lon = lat, lon\n self.focal = float(exif_data[\"FocalLengthIn35mmFilm\"])\n self.timestamp = datetime.datetime.strptime(exif_data[\"DateTimeOriginal\"], \"%Y:%m:%d %H:%M:%S\").timestamp()\n self.altitude = gps_altitude[0] / gps_altitude[1]\n self.direction = float(gps_direction) if gps_direction is not None else None\n self.pixel_size = (self.altitude * 35.0 / self.focal) / float(self.width)\n # transform attributes\n self.point = self.drone_map.reproject(lon,lat)\n self.angle = float(gps_direction) if gps_direction is not None else 0\n self.scale = 1.0", "def get_picture_info(instance, preset_name):\n # Bail out if the picture does not have an image as that's the object we use to get\n # all the information we need to return any picture info.\n if not instance.picture:\n return None\n\n thumbnailer = instance.picture.easy_thumbnails_thumbnailer\n\n # Look for the preset in settings and fallback to \"default\"\n preset = SIMPLEPICTURE_PRESETS.get(preset_name, SIMPLEPICTURE_PRESETS[\"default\"])\n\n # Complete picture information with thumbnails url calculated according to what is\n # defined in the preset\n picture_info = {}\n location_dict = {\"subject_location\": instance.picture.subject_location}\n\n # - src\n options = preset[\"src\"].copy()\n options.update(location_dict)\n picture_info[\"src\"] = thumbnailer.get_thumbnail(options).url\n\n # - srcset\n srcset = []\n for info in preset.get(\"srcset\", []):\n options = info[\"options\"].copy()\n options.update(location_dict)\n url = thumbnailer.get_thumbnail(options).url\n srcset.append(f\"{url:s} {info['descriptor']:s}\")\n picture_info[\"srcset\"] = \", \".join(srcset) if srcset else None\n\n # - sizes\n picture_info[\"sizes\"] = preset.get(\"sizes\")\n\n return picture_info", "def _getAttributes(self):\n self._params = {}\n if self.interp is not None:\n # Initialize interpolation function :\n self['x'] = np.arange(0, self.pixels, 1)\n self['y'] = np.arange(0, self.pixels, 1)\n # Define newaxis :\n self['xnew'] = np.arange(0, self.pixels, self.interp)\n self['ynew'] = np.arange(0, self.pixels, self.interp)\n self['csize'] = len(self['xnew'])\n else:\n self['csize'] = self.pixels\n # Variables :\n l = int(self['csize'] / 2)\n self['l'] = l\n y, x = np.ogrid[-l:l, -l:l]\n disc = x**2 + y**2\n self['mask'] = disc < l**2\n self['nmask'] = np.invert(self['mask'])\n # self['image'] = np.tile(self.bgcolor[np.newaxis, ...], (2*l, 2*l, 1))", "def __bobo_traverse__(self, REQUEST, name):\n if name.startswith('image'):\n field = self.getField('image')\n image = None\n if name == 'image':\n image = field.getScale(self)\n else:\n scalename = name[len('image_'):]\n if scalename in field.getAvailableSizes(self):\n image = field.getScale(self, scale=scalename)\n if image is not None and not isinstance(image, basestring):\n # image might be None or '' for empty images\n return image\n\n return base.ATCTContent.__bobo_traverse__(self, REQUEST, name)", "def _make_image_info_hst(self, flistname):\n\n flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n #fname=line.strip()\n flist.append(fname)\n magzp_list.append(magzp)\n\n magzp = np.array(magzp_list)\n\n nimage = len(flist)\n\n path_len = max([len(f) for f in flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f.replace('sci.fits','wht.fits')\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def get_img_data(data_type, file_info, img_info, **kwargs):\n if file_info['ext']=='fits':\n hdulist = get_file(file_info)\n data = hdulist[int(img_info['frame'])].data\n else:\n try:\n from PIL import Image\n except ImportError:\n raise ToyzJobError(\n \"You must have PIL (Python Imaging Library) installed to \"\n \"open files of this type\"\n )\n img = get_file(file_info)\n data = np.array(img)\n \n if data_type == 'data':\n if 'scale' in kwargs:\n width = int(kwargs['width']/2/img_info['viewer']['scale'])\n height = int(kwargs['height']/2/img_info['viewer']['scale'])\n else:\n width = int(kwargs['width']/2)\n height = int(kwargs['height']/2)\n x0 = max(0, kwargs['x']-width)\n y0 = max(0, kwargs['y']-height)\n xf = min(data.shape[1], kwargs['x']+width)\n yf = min(data.shape[0], kwargs['y']+height)\n if 'scale' in kwargs:\n tile_data = {\n 'x0_idx': x0,\n 'y0_idx': y0,\n 'xf_idx': xf,\n 'yf_idx': yf\n }\n data = scale_data(file_info, img_info, tile_data, data)\n else:\n data = data[y0:yf, x0:xf]\n response = {\n 'id': 'data',\n 'min': float(data.min()),\n 'max': float(data.max()),\n 'mean': float(data.mean()),\n 'median': float(np.median(data)),\n 'std_dev': float(np.std(data)),\n 'data': data.tolist()\n }\n elif data_type == 'datapoint':\n if (kwargs['x']<data.shape[1] and kwargs['y']<data.shape[0] and\n kwargs['x']>=0 and kwargs['y']>=0):\n response = {\n 'id': 'datapoint',\n 'px_value': float(data[kwargs['y'],kwargs['x']])\n }\n else:\n response = {\n 'id': 'datapoint',\n 'px_value': 0\n }\n else:\n raise ToyzJobError(\"Loading that data type has not been implemented yet\")\n return response", "def _make_image_info_des(self, flistname):\n\n flist=[]\n psfex_flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n magzp_list.append(magzp)\n\n flist.append(fname)\n\n psfex_fname = fname.replace('.fits.fz','_psfcat.psf')\n psfex_flist.append(psfex_fname)\n\n nimage = len(flist)\n magzp = np.array(magzp_list)\n\n path_len = max([len(f) for f in flist])\n psfex_path_len = max([len(f) for f in psfex_flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n extra_dtype = [\n ('psfex_path','U%d' % psfex_path_len),\n ]\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f\n image_info['psfex_path'][i] = psfex_flist[i]\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def generate_image_info(im, params):\n im = ee.Image(im)\n\n # some images are scaled to a factor of 10.\n if params.get('scale') == 'log':\n im = im.log10()\n\n im = im.sldStyle(params.get('sld_style'))\n\n m = im.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'.format(\n mapid=mapid,\n token=token\n )\n\n result = {\n 'mapid': mapid,\n 'token': token,\n 'url': url\n }\n return result", "def __bobo_traverse__(self, REQUEST, name):\n if name.startswith('image'):\n field = self.getField('image')\n image = None\n if name == 'image':\n image = field.getScale(self)\n else:\n scalename = name[len('image_'):]\n scalename.replace(\".jpg\", \"\")\n if scalename in field.getAvailableSizes(self):\n image = field.getScale(self, scale=scalename)\n if image is not None and not isinstance(image, basestring):\n # image might be None or '' for empty images\n return image\n\n return base.ATCTContent.__bobo_traverse__(self, REQUEST, name)", "def transform(data):\n if 'name' not in data or 'engine' not in data:\n return None\n return Image(\n name=data['name'],\n engine=data['engine'],\n id=data['id'] if 'id' in data else '',\n parameters=data['parameters'] if 'parameters' in data else {},\n capabilities=data['capabilities'] if 'capabilities' in data else {},\n node=data['node'] if 'node' in data else {}\n )", "def extractScale(self,groups):\n self.scaleX = float(groups[0])\n self.scaleY = float(groups[0])\n if len(groups) == 2 and groups[1]:\n self.scaleY = float(groups[1])\n self.matrix = [[self.scaleX, 0.0, 0.0], \\\n [0.0, self.scaleY, 0.0]]", "def miscinfo(self):\n return _image.image_miscinfo(self)", "def image(self):\n return self._image", "def get_image_attributes(self, element):", "def imgProp(img):\n\td = {}\n\td[\"shape\"] = img.shape\n\td[\"rows\"] = img.shape[0]\n\td[\"columns\"] = img.shape[1]\n\tif len(img.shape) is 3:\n\t\td[\"channels\"] = img.shape[2]\n\td[\"size\"] = img.size\n\td[\"dtype\"] = img.dtype\n\treturn d", "def __bobo_traverse__(self, REQUEST, name):\n if name.startswith('image'):\n field = self.getField('image')\n image = None\n if name == 'image':\n image = field.getScale(self)\n else:\n scalename = name[len('image_'):]\n scalename.replace(\".jpg\", \"\")\n if scalename in field.getAvailableSizes(self):\n image = field.getScale(self, scale=scalename)\n if image is not None and not isinstance(image, basestring):\n # image might be None or '' for empty images\n return image\n\n return ATCTContent.__bobo_traverse__(self, REQUEST, name)", "def format_img_size(self, img, C):\n img_min_side = float(C.im_size)\n (height,width,_) = img.shape\n\n if width <= height:\n ratio = img_min_side/width\n new_height = int(ratio * height)\n new_width = int(img_min_side)\n else:\n ratio = img_min_side/height\n new_width = int(ratio * width)\n new_height = int(img_min_side)\n img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)\n return img, ratio", "def _scale_to_zero_one(img):\n if img.dtype == np.uint8:\n img = img.astype(np.float32)\n return np.multiply(img, 1.0 / 255.0)\n else:\n print(\"image values already seem to be float\")\n return img", "def parse_image_meta_graph(self, meta):\n\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n pass", "def thumbnail(self, item):\n if self._has_image_field(item) and self._field_is_visible('image'):\n tile_conf = self.get_tile_configuration()\n image_conf = tile_conf.get('image', None)\n if image_conf:\n scaleconf = image_conf['imgsize']\n # Scale string is something like: 'mini 200:200'.\n # We need the name only: 'mini'.\n scale = scaleconf.split(' ')[0]\n scales = ploneapi.content.get(path='@@images')\n return scales.scale('image', scale)", "def set_pic_size(self, im_name):\n im_vals = np.genfromtxt(im_name, delimiter=self.delim)\n self.pic_width = int(np.size(im_vals[0]) - 1) # the first column of ASCII image is row number\n try: self.pic_height = int(np.size(im_vals[:,0])) \n except IndexError: \n self.pic_width = int(np.size(im_vals) - 1)\n self.pic_height = 1\n self.create_rect_mask()\n return self.pic_width, self.pic_height", "def adjust_image_resolution(data):\n\n output_large = cStringIO.StringIO()\n output_default = cStringIO.StringIO()\n output_tiny = cStringIO.StringIO()\n \n try:\n im0 = Image.open(cStringIO.StringIO(data))\n im0.thumbnail((1280, 1280), Image.ANTIALIAS)\n im0.save(output_large, 'JPEG')\n\n im1 = Image.open(cStringIO.StringIO(data))\n im1.thumbnail((1024, 1024), Image.ANTIALIAS)\n # could run entropy check to see if GIF makes more sense given an item.\n im1.save(output_default, 'JPEG')\n \n im2 = Image.open(cStringIO.StringIO(data))\n im2.thumbnail((120, 120), Image.ANTIALIAS)\n im2.save(output_tiny, 'JPEG')\n except IOError:\n return None\n \n return {\"large\" : output_large.getvalue(),\n \"default\" : output_default.getvalue(),\n \"tiny\" : output_tiny.getvalue()}", "def thumbnail(self, item):\n if self._has_image_field(item) and self._field_is_visible(\"image\"):\n tile_conf = self.get_tile_configuration()\n image_conf = tile_conf.get(\"image\", None)\n if image_conf:\n scaleconf = image_conf[\"imgsize\"]\n # scale string is something like: 'mini 200:200' and\n # we need the name only: 'mini'\n if scaleconf == \"_original\":\n scale = None\n else:\n scale = scaleconf.split(\" \")[0]\n scales = item.restrictedTraverse(\"@@images\")\n return scales.scale(\"image\", scale)", "def get_scale():\r\n\r\n \r\n return 0.5", "def _get_image_properties(image):\n immin, immax = np.min(image), np.max(image)\n imtype = image.dtype.type\n try:\n lo, hi = dtype_range[imtype]\n except KeyError:\n lo, hi = immin, immax\n\n signed = immin < 0\n out_of_range_float = (np.issubdtype(image.dtype, np.floating) and\n (immin < lo or immax > hi))\n unsupported_dtype = image.dtype not in _supported_types\n\n return ImageProperties(signed, out_of_range_float, unsupported_dtype)", "def get_scale(self):\r\n try: return self.scale[0], self.scale[1], self.scale[2]\r\n except: return self.scale, self.scale, self.scale", "def get_image_info(path):\n try:\n image = Image.open(path)\n except IOError:\n logger.error(f\"'{path}' is not an image\")\n return\n\n if image.format != \"JPEG\":\n logger.error(f\"'{path}' is not a JPEG\")\n return\n\n info = {\n \"filename\": path,\n \"width\": image.width,\n \"height\": image.height,\n \"fileSize\": os.path.getsize(path),\n \"md5\": md5sum_file(path),\n }\n return info", "def generate_image_info(image):\n image = ee.Image(image)\n image_vis = image.visualize(**{\n 'min': image_min,\n 'max': image_max,\n 'palette': image_palette\n })\n\n print(image_min, image_max)\n\n if 'hillshade' in r and r['hillshade']:\n image_vis = hillshade(image_vis,\n image.subtract(image_min).divide(ee.Image.constant(image_max).subtract(image_min)),\n True)\n\n m = image_vis.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'.format(\n mapid=mapid,\n token=token\n )\n\n result = {\n 'mapid': mapid,\n 'token': token,\n 'url': url\n }\n return result", "def setmiscinfo(self, info):\n return _image.image_setmiscinfo(self, info)", "def create_img_object(file, scale):\n with Raw(filename=file) as raw:\n img = Image.open(io.BytesIO(raw.thumbnail_to_buffer())).convert('RGBA')\n resize_dims = list(map(int, (i * scale for i in img.size)))\n img = img.resize(resize_dims)\n return img", "def print_image_info(input_image):\n print()\n print(\"Basic Information on image: {}\".format(input_image.filename))\n print(\"Format: {}\".format(input_image.format))\n print(\"Mode: {}\".format(input_image.mode))\n print(\"Size: {}\".format(input_image.size))\n print(\"Width: {}\".format(input_image.width))\n print(\"Height: {}\".format(input_image.height))\n print(\"Palette: {}\".format(input_image.palette))\n print()", "def image_info(path):\n global working_img\n working_img = Image.open(path)\n print('=======================================')\n print(f'이미지 파일 이름:{working_img.filename}')\n print(f'이미지 파일 파일 형식:{working_img.format}')\n print(f'이미지 용량:{working_img.size}')\n print(f'이미지 색상모드:{working_img.mode}')\n print(f'이미지 크기:{working_img.width}x{working_img.height}')", "def get_image(imagename, nx = 0, ny = 1, nz = 1, im = 0):\n\tif type(imagename) == type(\"\"):\n\t e = EMData()\n\t e.read_image(imagename, im)\n\telif not imagename:\n\t e = EMData()\n\t if (nx > 0): e.set_size(nx, ny, nz)\n\telse:\n\t e = imagename\n\treturn e", "def __init__(self, img, settings):\r\n self.img_orig = img\r\n self.settings = settings", "def preprocessing(image_data, max_height, max_width):\n img = image_data[\"image\"]\n img = resize_image(img, max_height, max_width)\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = image_data[\"objects\"][\"label\"]\n return img, gt_boxes, gt_labels", "def settings(self):\n return dict(img_size=self.img_size, interpolation=self.interpolation)", "def Rescale(self):\r\n picWidth,picHeight = self.oldSize = self.GetSizeTuple()\r\n bitmap = self.scaled = self.bitmap\r\n if not bitmap: return\r\n imgWidth,imgHeight = bitmap.GetWidth(),bitmap.GetHeight()\r\n if self.scaling == 2 or (self.scaling == 1 and (imgWidth > picWidth or imgHeight > picHeight)):\r\n image = bitmap.ConvertToImage()\r\n factor = min(1.0*picWidth/imgWidth,1.0*picHeight/imgHeight)\r\n newWidth,newHeight = int(factor*imgWidth),int(factor*imgHeight)\r\n self.scaled = image.Scale(newWidth,newHeight).ConvertToBitmap()\r\n #self.scaled = image.Scale(newWidth,newHeight,wx.IMAGE_QUALITY_HIGH ).ConvertToBitmap()\r", "def imageScale(scale):\n\t\treturn max(1, int(scale * (InterfaceTools.getCanvasSize()[0] / height)))", "def retrieveImageInfo(self, filename):\t\t \n\t\tassert filename, \"Filename must be defined\"\n\t\tassert os.path.exists(filename), \"File that we're retrieving information \\\n\t\t\t\t\t\t\t\t\t\tfrom (%s) needs to exist, but doesn't.\" % filename\n\t\tself.ext = filename.split(\".\")[-1].lower()\n\t\trdr = self.getReaderByExtension(self.ext)\n\t\t\n\t\tif self.ext == \"bmp\":\n\t\t\trdr.Allow8BitBMPOn()\n\t\trdr.SetFileName(filename)\n\t\tif rdr.IsA(\"vtkExtTIFFReader\"):\n\t\t\trdr.UpdateInformation()\n\t\t\tif rdr.GetNumberOfScalarComponents() == 1:\n\t\t\t\trdr.RawModeOn()\n\n\t\tdata = rdr.GetOutput()\n\t\tdata.Update()\n\t\tself.numberOfComponents = data.GetNumberOfScalarComponents()\n\n\t\tif not self.ctf:\n\t\t\tbd = self.getDataBitDepth(data)\n\t\t\tself.ctf = vtk.vtkColorTransferFunction()\n\t\t\tif bd == 8 or bd == 12:\n\t\t\t\tself.ctf.AddRGBPoint(0, 0, 0, 0)\n\t\t\t\tself.ctf.AddRGBPoint((2 ** bd) - 1, 0, 1, 0)\n\t\t\telse:\n\t\t\t\trange = data.GetScalarRange()\n\t\t\t\tself.ctf.AddRGBPoint(range[0], 0, 0, 0)\n\t\t\t\tself.ctf.AddRGBPoint(range[1], 0, 1, 0)\n\t\t\t\n\t\tself.x, self.y, z = data.GetDimensions()\n\t\tself.dimensions = (self.x, self.y, self.slicesPerTimepoint)\n\t\tif z > 1:\n\t\t\tself.slicesPerTimepoint = z\n\t\t\tself.z = z\n\t\t\tself.dimensions = (self.x, self.y, self.slicesPerTimepoint)\n\t\t\tlib.messenger.send(self, \"update_dimensions\")\n\t\tself.originalDimensions = self.dimensions", "def image_data_format():\n return _IMAGE_DATA_FORMAT", "def __call__(self, results):\n if 'scale' not in results:\n if 'scale_factor' in results:\n img_shape = results['img'].shape[:2]\n scale_factor = results['scale_factor']\n assert isinstance(scale_factor, float)\n results['scale'] = tuple([int(x * scale_factor) for x in img_shape][::-1])\n else:\n self._random_scale(results)\n else:\n if not self.override:\n assert 'scale_factor' not in results, 'scale and scale_factor cannot be both set.'\n else:\n results.pop('scale')\n if 'scale_factor' in results:\n results.pop('scale_factor')\n self._random_scale(results)\n\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_cbboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n\n return results", "def __call__(self, results):\n\n if 'scale' not in results:\n if 'scale_factor' in results:\n img_shape = results['img'].shape[:2]\n scale_factor = results['scale_factor']\n assert isinstance(scale_factor, float)\n results['scale'] = tuple(\n [int(x * scale_factor) for x in img_shape][::-1])\n else:\n self._random_scale(results)\n else:\n if not self.override:\n assert 'scale_factor' not in results, (\n 'scale and scale_factor cannot be both set.')\n else:\n results.pop('scale')\n if 'scale_factor' in results:\n results.pop('scale_factor')\n self._random_scale(results)\n\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n return results", "def __call__(self, results):\n\n if 'scale' not in results:\n if 'scale_factor' in results:\n img_shape = results['img'].shape[:2]\n scale_factor = results['scale_factor']\n assert isinstance(scale_factor, float)\n results['scale'] = tuple(\n [int(x * scale_factor) for x in img_shape][::-1])\n else:\n self._random_scale(results)\n else:\n if not self.override:\n assert 'scale_factor' not in results, (\n 'scale and scale_factor cannot be both set.')\n else:\n results.pop('scale')\n if 'scale_factor' in results:\n results.pop('scale_factor')\n self._random_scale(results)\n\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n return results", "def get_entry_dict(self):\n\n # generating thumbnail URLs is slow, so only generate the ones\n # that will definitely be used.\n ret = {\n 'id': self.id,\n 'vertices': self.vertices,\n 'triangles': self.triangles,\n 'segments': self.segments,\n 'photo': self.photo.get_entry_dict(),\n }\n if self.dominant_rgb0:\n ret['dominant_rgb0'] = self.dominant_rgb0\n #if self.image_pbox:\n #ret['pbox'] = self.pbox\n #ret['image_pbox'] = {\n #'300': self.image_pbox_300.url,\n #'512': self.image_pbox_512.url,\n #'1024': self.image_pbox_1024.url,\n #'orig': self.image_pbox.url,\n #}\n if self.image_bbox:\n ret['image_bbox'] = {\n #'512': self.image_bbox_512.url,\n '1024': self.image_bbox_1024.url,\n #'orig': self.image_bbox.url,\n }\n return ret", "def get_image_data(self):\n raise NotImplementedError(str(type(self)) + 'does not'\n 'implement get_image.')", "def format_img(img, C):\n img, ratio = format_img_size(img, C)\n img = format_img_channels(img, C)\n return img, ratio", "def format_img(img, C):\n img, ratio = format_img_size(img, C)\n img = format_img_channels(img, C)\n return img, ratio", "def __init__(self):\n super(ISimpleITKImageMetric, self).__init__()\n self.metric = 'ISimpleITKImageMetric'\n self.ground_truth = None # SimpleITK.Image\n self.segmentation = None # SimpleITK.Image", "def get_image_and_prep(self,file_path):\r\n img = np.array(Image.open(file_path).convert('1'))\r\n img = img.reshape(28,28,1)\r\n return img", "def __init__(self,scale):\n self.scale = scale", "def value(self):\n return self._image._A if self._image else None", "def _read_empty(self):\n self.image_missing = True\n\n return_img = {}\n return_metadata = {}\n\n try:\n rows, cols = self.grid.subset_shape\n except AttributeError:\n rows, cols = self.grid.shape\n\n for param in self.parameters:\n data = np.full((rows, cols), np.nan)\n return_img[param] = data.flatten()\n return_metadata[param] = {'image_missing': 1}\n\n return return_img, return_metadata", "def convert_img(self):\r\n self.img = self.img.convert('RGB')", "def getImage(self):\n return self.get('heightfield.image')", "def compose_image_meta(self, image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n\n meta = np.array([image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=class_num\n )\n return meta\n pass", "def load_image(default=True):\n if default:\n print(\"in heres\")\n return self.img\n else:\n img = Image.fromarray(cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB))\n self.size = img.shape\n return img", "def __init__(self, filename):\n #read the image, scale it and save it\n image = imread(filename)\n self.image = image\n self.scaled = image / 255\n #check if it is in color or grayscale\n if self.scaled.shape[-1] == 3:\n #compute its brightess matrix by averaging the RGB values at each pixel\n self.brightness = self.scaled.mean(axis = 2)\n self.flat_brightness = np.ravel(self.brightness)\n else:\n self.flat_brightness = np.ravel(self.scaled)", "def compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta", "def getImageInfo(img, header=''):\n if (os.path.exists(img) == False):\n print \"image not found: \", img\n return\n # Assume this is a CASA image\n if (header == ''):\n try:\n print \"imhead\",\n header = imhead(img, mode = 'list') # This will work for most CASA builds\n except:\n print \"imhead\",\n header = imhead(img) # needed to prevent crash in early CASA 4.6 builds (see CAS-8214)\n print \"imhead\",\n header = imhead(img, mode = 'list')\n if (header is None):\n print \"imhead returned NoneType. This image header is not sufficiently standard.\"\n return\n if ('beammajor' in header.keys()):\n bmaj = header['beammajor']\n bmin = header['beamminor']\n bpa = header['beampa']\n elif ('perplanebeams' in header.keys()):\n beammajor = []\n beamminor = []\n beampa = []\n for beamchan in range(header['perplanebeams']['nChannels']):\n beamdict = header['perplanebeams']['*'+str(beamchan)]\n beammajor.append(beamdict['major']['value'])\n beamminor.append(beamdict['minor']['value'])\n beampa.append(beamdict['positionangle']['value'])\n bmaj = np.median(beammajor)\n bmin = np.median(beamminor)\n sinbpa = np.sin(np.radians(np.array(beampa)))\n cosbpa = np.cos(np.radians(np.array(beampa)))\n bpa = np.degrees(np.median(np.arctan2(np.median(sinbpa), np.median(cosbpa))))\n else:\n bmaj = 0\n bmin = 0\n bpa = 0\n naxis1 = header['shape'][0]\n naxis2 = header['shape'][1]\n cdelt1 = header['cdelt1']\n cdelt2 = header['cdelt2']\n if (header['cunit1'].find('rad') >= 0):\n # convert from rad to arcsec\n cdelt1 *= 3600*180/np.pi\n elif (header['cunit1'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt1 *= 3600\n if (header['cunit2'].find('rad') >= 0):\n cdelt2 *= 3600*180/np.pi\n # convert from rad to arcsec\n elif (header['cunit2'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt2 *= 3600\n if (type(bmaj) == dict):\n # casa >= 4.1.0 (previously these were floats)\n bmaj = headerToArcsec(bmaj)\n bmin = headerToArcsec(bmin)\n bpa = headerToArcsec(bpa)/3600.\n ghz = 0\n if ('ctype4' in header.keys()):\n if (header['ctype4'] == 'Frequency'):\n imgfreq = header['crval4']\n cdelt = header['cdelt4']\n crpix = header['crpix4']\n npix = header['shape'][3]\n ghz = imgfreq*1e-9\n if (ghz == 0):\n if ('ctype3' in header.keys()):\n if (header['ctype3'] == 'Frequency'):\n imgfreq = header['crval3']\n cdelt = header['cdelt3']\n crpix = header['crpix3']\n npix = header['shape'][2]\n ghz = imgfreq*1e-9\n return([bmaj,bmin,bpa,cdelt1,cdelt2,naxis1,naxis2,ghz], header)", "def get_tile_info(file_info, img_info):\n all_tiles = []\n new_tiles = {}\n if img_info['invert_x']:\n xmin = img_info['width']*img_info['scale'] - img_info['viewer']['right']\n xmax = img_info['width']*img_info['scale'] - img_info['viewer']['left']\n else:\n xmin = img_info['viewer']['left']\n xmax = img_info['viewer']['right']\n if img_info['invert_y']:\n ymin = img_info['height']*img_info['scale'] - img_info['viewer']['bottom']\n ymax = img_info['height']*img_info['scale'] - img_info['viewer']['top']\n else:\n ymin = img_info['viewer']['top']\n ymax = img_info['viewer']['bottom']\n minCol = int(max(1,math.floor(xmin/file_info['tile_width'])))-1\n maxCol=int(min(img_info['columns'],math.ceil(xmax/file_info['tile_width'])))\n minRow = int(max(1,math.floor(ymin/file_info['tile_height'])))-1\n maxRow = int(min(img_info['rows'],math.ceil(ymax/file_info['tile_height'])))\n \n block_width = int(math.ceil(file_info['tile_width']/img_info['scale']))\n block_height = int(math.ceil(file_info['tile_height']/img_info['scale']))\n \n for row in range(minRow,maxRow):\n y0 = row*file_info['tile_height']\n yf = (row+1)*file_info['tile_height']\n y0_idx = int(y0/img_info['scale'])\n yf_idx = min(y0_idx + block_height, img_info['height'])\n for col in range(minCol,maxCol):\n all_tiles.append(str(col)+','+str(row))\n tile_idx = str(col)+','+str(row)\n if (tile_idx not in img_info['tiles'] or \n 'loaded' not in img_info['tiles'][tile_idx] or\n not img_info['tiles'][tile_idx]['loaded']):\n x0 = col*file_info['tile_width']\n xf = (col+1)*file_info['tile_width']\n x0_idx = int(x0/img_info['scale'])\n xf_idx = min(x0_idx+block_width, img_info['width'])\n tile_width = int((xf_idx-x0_idx)*img_info['scale'])\n tile_height = int((yf_idx-y0_idx)*img_info['scale'])\n new_filepath = get_tile_filename(\n file_info, img_info, x0_idx, xf_idx, y0_idx, yf_idx)\n tile = {\n 'idx': tile_idx,\n 'left': x0,\n 'right': xf,\n 'top': y0,\n 'bottom': yf,\n 'y0_idx': y0_idx,\n 'yf_idx': yf_idx,\n 'x0_idx': x0_idx,\n 'xf_idx': xf_idx,\n 'new_filepath': new_filepath,\n 'loaded': False,\n 'row': row,\n 'col': col,\n 'x': col*file_info['tile_width'],\n 'y': row*file_info['tile_height'],\n 'width': tile_width,\n 'height': tile_height\n }\n if img_info['invert_y']:\n tile['top'] = yf\n tile['bottom'] = y0\n if img_info['invert_x']:\n tile['left'] = xf\n tile['right'] = x0\n new_tiles[tile_idx] = tile\n print('viewer:', img_info['viewer'])\n print('new tiles', new_tiles.keys())\n return all_tiles, new_tiles", "def get_full_img(self, scale=1 / 8, onehot=False):\n if self.obs_vision:\n full_img = self.get_full_obs_render(scale=scale)\n else:\n full_img = self.grid.encode(self, onehot=onehot)\n # NOTE: in case need to scale here instead of in above func call: return cv2.resize(full_img, (0, 0), fx=0.125, fy=0.125, interpolation=cv2.INTER_AREA)\n return full_img", "def __init__(self, img):\n self.img = img", "def reScaleLandsat(self,img):\n \n\t\tthermalBand = ee.List(['thermal'])\n\t\tthermal = ee.Image(img).select(thermalBand).multiply(10)\n \n\t\totherBands = ee.Image(img).bandNames().removeAll(thermalBand)\n\t\tscaled = ee.Image(img).select(otherBands).divide(0.0001)\n \n\t\timage = ee.Image(scaled.addBands(thermal)).int16()\n \n\t\treturn image.copyProperties(img)", "def populate_image_stats(self, image):\n ti = image\n image_data = ti.data\n if not ti.data:\n return ti\n ti.size = len(image_data)\n try:\n with connect(Blobby) as c:\n ti.shahash = c.get_data_bhash(image_data)\n except o.Exception, ex:\n raise o.Exception('oException getting shahash: %s' % ex.msg)\n except Exception, ex:\n raise o.Exception('Exception getting shahash: %s' % ex)\n\n try:\n b = StringIO(image_data)\n img = Image.open(b)\n except Exception, ex:\n raise o.Exception('Exception getting PIL img: %s' % ex)\n try:\n ti.xdim, ti.ydim = img.size\n except Exception, ex:\n raise o.Exception('Exception getting dimensions: %s' % ex)\n try:\n ti.vhash = str(average_hash(img))\n except Exception, ex:\n raise o.Exception('Exception getting vhash: %s' % ex)\n\n return ti", "def get_image(self, size, mode='normal', state='on'):\n raise NotImplementedError", "def __init__(self, image):\n self.image = image", "def parse_image_meta_graph(meta):\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }", "def get_blank_img(self):\n if photos_settings.DEBUG:\n return self.get_placeholder_img()\n\n out = {\n 'blank': True,\n 'width': self.max_width,\n 'height': self.max_height,\n 'url': photos_settings.EMPTY_IMAGE_SITE_PREFIX + 'img/empty/%s.png' % (self.name),\n }\n return out", "def __call__(self, image, requiredWidth, requiredHeight):\n # Resize the image first\n if image.shape[0] != requiredHeight or image.shape[1] != requiredWidth:\n image = cv2.resize(image, (requiredWidth, requiredHeight))\n\n if self.rgb:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # Convert the image to a floating point value\n image = image.astype(np.float32)\n\n if self.scale is not None:\n image *= self.scale\n if self.bias is not None:\n image += self.bias\n\n return image", "def __getitem__(self, item):\n\n image = Image.open(self.imgs_path[item]).convert(\"RGB\")\n\n # Applying the transformations\n image = self.transform(image)\n\n img_name = self.imgs_path[item].split('/')[-1].split('.')[0]\n # print(self.labels[item])\n # print(self.extra_info[item])\n\n if self.extra_info is None:\n extra_info = []\n else:\n extra_info = self.extra_info[item]\n\n if self.labels is None:\n labels = []\n else:\n labels = self.labels[item]\n\n return image, labels, extra_info, img_name", "def _info(self) -> tfds.core.DatasetInfo:\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n citation=_CITATION,\n features=tfds.features.FeaturesDict({\n 'image': tfds.features.Image(shape=(None, None, 3)),\n 'label': tfds.features.ClassLabel(names=_CLASS_NAMES),\n }),\n homepage=_HOMEPAGE,\n supervised_keys=('image', 'label'),\n )", "def _scale(self, image):\n\n if image.GetWidth() != self._width or image.GetHeight()!= self._height:\n image.Rescale(self._width, self._height)\n \n return image", "def image_properties(self):\n return self._image_properties", "def load_image(self, image_id):\n# logger.info(\"image {}\".format(image_id))\n info = self.image_info[image_id]\n if info[\"image\"] is None:\n im = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n image = np.ones([info['height'], info['width'], 1], dtype=np.uint8)\n image[:,:,0] = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n# image[:,:,1] = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n# image[:,:,2] = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n self.image_info[image_id][\"image\"] = image\n# logger.info(\"cached {}\".format(image_id))\n else:\n image = self.image_info[image_id][\"image\"]\n# logger.info(\"missed {}\".format(image_id))\n\n return image" ]
[ "0.647343", "0.6325045", "0.6177507", "0.61103743", "0.61006016", "0.60990447", "0.60865074", "0.6057824", "0.6057824", "0.6057824", "0.6057824", "0.6057824", "0.59974873", "0.5942097", "0.5923346", "0.58519304", "0.5841813", "0.5808897", "0.57983494", "0.5795056", "0.57798666", "0.57730556", "0.57640034", "0.5718838", "0.57116294", "0.5709371", "0.5697625", "0.5695958", "0.5695206", "0.5693608", "0.56825", "0.5659136", "0.5652897", "0.56239635", "0.56163716", "0.5606664", "0.55881965", "0.55710304", "0.5547381", "0.5540823", "0.55255926", "0.55250466", "0.55129445", "0.5505887", "0.54964083", "0.5490037", "0.5487466", "0.54797405", "0.54531956", "0.54394656", "0.5421063", "0.541754", "0.54073286", "0.5400346", "0.5394751", "0.5394534", "0.5391262", "0.5383564", "0.5375029", "0.53739375", "0.53723973", "0.5372129", "0.5371134", "0.53650486", "0.53644985", "0.53622925", "0.53573304", "0.53476757", "0.53476757", "0.5340022", "0.5338343", "0.53367746", "0.53367746", "0.53153086", "0.5309929", "0.5302338", "0.53008896", "0.5296656", "0.52892315", "0.5285005", "0.5284862", "0.5272399", "0.5265168", "0.525985", "0.5252844", "0.5241972", "0.5241019", "0.5228513", "0.5217348", "0.5215693", "0.5206696", "0.52042", "0.52033234", "0.52033114", "0.52029675", "0.51941794", "0.5192512", "0.5191742", "0.5186529", "0.51854026" ]
0.65033776
0
get the image_info dtype for the specified path string length and wcs string length
def get_image_info_dtype(path_len, image_id_len=None, wcs_len=None, ext_len=None, extra_dtype=None): path_fmt = 'U%d' % path_len if image_id_len is None: image_id_descr = 'i8' else: image_id_descr = 'U%d' % image_id_len if ext_len is not None: ext_descr = 'U%d' % ext_len else: ext_descr = 'i2' dt=[] for ctype in IMAGE_INFO_TYPES: path_name = '%s_path' % ctype ext_name = '%s_ext' % ctype dt += [ (path_name, path_fmt), (ext_name,ext_descr), ] dt += [ ('image_id', image_id_descr), ('image_flags', 'i8'), ('magzp', 'f4'), ('scale', 'f4'), ('position_offset','f8'), ] if wcs_len is not None: wcs_fmt = 'U%d' % wcs_len dt += [ ('wcs',wcs_fmt), ] if extra_dtype is not None: dt += extra_dtype return dt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_image_info_struct(nimage, path_len,\n image_id_len=None,\n wcs_len=None,\n ext_len=None,\n extra_dtype=None):\n dt = get_image_info_dtype(\n path_len,\n image_id_len=image_id_len,\n wcs_len=wcs_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n\n data = np.zeros(nimage, dtype=dt)\n\n data['scale'] = 1.0\n\n return data", "def get_dimensions(image, classname):\n start, ext = os.path.splitext(image)\n if ext == '.yuv':\n bitdepth = \"8\"\n res_split = start.split('x')\n width_split = res_split[0].split('_')\n width = width_split[-1]\n height_split = res_split[-1].split('_')\n m = res_split[-1].find(\"bit\")\n if res_split[-1][m - 2] == \"_\":\n depth = res_split[-1][m - 1]\n else:\n depth = res_split[-1][m - 2:m]\n height = height_split[0]\n elif classname == \"classE_exr\":\n size = os.path.basename(image).split('_')[2]\n try:\n dimension_cmd = [\"identify\", '-size', size, '-format', '%w,%h,%z', image]\n width, height, depth = subprocess.check_output(dimension_cmd).split(\",\")\n except subprocess.CalledProcessError as e:\n print dimension_cmd, e.output\n else:\n try:\n dimension_cmd = [\"identify\", '-format', '%w,%h,%z', image]\n width, height, depth = subprocess.check_output(dimension_cmd).split(\",\")\n except subprocess.CalledProcessError as e:\n print dimension_cmd, e.output\n return width, height, depth", "def getImageWidthHeight(path2img):\n from struct import unpack\n with open(path2img, 'rb') as f:\n metadata = f.read(25)\n return unpack('>LL', metadata[16:24])[::-1]", "def fast_get_image_size(raw_data):\n size = len(raw_data)\n data = raw_data[:25]\n input_io = io.BytesIO(data)\n if (size >= 10) and data[:6] in ('GIF87a', 'GIF89a'):\n # GIFs\n w, h = struct.unpack(\"<HH\", data[6:10])\n width = int(w)\n height = int(h)\n elif ((size >= 24) and data.startswith('\\211PNG\\r\\n\\032\\n')\n and (data[12:16] == 'IHDR')):\n # PNGs\n w, h = struct.unpack(\">LL\", data[16:24])\n width = int(w)\n height = int(h)\n elif (size >= 16) and data.startswith('\\211PNG\\r\\n\\032\\n'):\n # older PNGs?\n w, h = struct.unpack(\">LL\", data[8:16])\n width = int(w)\n height = int(h)\n elif (size >= 2) and data.startswith('\\377\\330'):\n # JPEG\n input_io.seek(0)\n input_io.read(2)\n b = input_io.read(1)\n try:\n w = ''\n h = ''\n while (b and ord(b) != 0xDA):\n while (ord(b) != 0xFF): b = input_io.read(1)\n while (ord(b) == 0xFF): b = input_io.read(1)\n if (ord(b) >= 0xC0 and ord(b) <= 0xC3):\n input_io.read(3)\n h, w = struct.unpack(\">HH\", input_io.read(4))\n break\n else:\n input_io.read(int(struct.unpack(\">H\", input_io.read(2))[0])-2)\n b = input_io.read(1)\n width = int(w)\n height = int(h)\n except Exception as e:\n #print 'get size error'\n return 0, 0\n else:\n # print \"Sorry, don't know how to get information from this file %s\" % file_path\n return 0, 0\n if width < 0 or height<0:\n return 0, 0\n else:\n return width, height", "def formatLookup(format_str):\n pat = '(\\d+)([A-Z])'\n match = re.search(pat, format_str)\n #print match.group()\n \n data_len = int(match.group(1))\n data_fmt = str(match.group(2))\n np_fmt = fitsFormatLookup(data_fmt)\n np_dtype = '%i%s'%(data_len, np_fmt)\n \n return np_dtype, data_len, np_fmt", "def get_image_size(path, width, type_name):\n fc = _os.path.getsize(path) / type_mapping[type_name].itemsize\n shape = [width, int(fc / width)]\n computed_size = shape[0] * shape[1] * type_mapping[type_name].itemsize\n measured_size = _os.path.getsize(path)\n return shape", "def get_metadata(filename):\n import ome_files\n \n reader = ome_files.OMETIFFReader()\n reader.set_id(filename)\n H, W, Z, T, C = reader.get_size_y(), reader.get_size_x(), reader.get_size_z(), reader.get_size_t(), reader.get_size_c()\n reader.close()\n return H, W, Z, T, C", "def fl_get_string_dimension(style, size, txtstr, strlng):\n _fl_get_string_dimension = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_string_dimension\",\\\n None, [cty.c_int, cty.c_int, xfdata.STRING, cty.c_int,\n cty.POINTER(cty.c_int), cty.POINTER(cty.c_int)],\\\n \"\"\"void fl_get_string_dimension(int fntstyle, int fntsize,\n const char * s, int len, int * width, int * height)\"\"\")\n library.check_if_flinitialized()\n library.checkfatal_allowed_value_in_list(style, xfdata.TEXTSTYLE_list)\n i_style = library.convert_to_intc(style)\n i_size = library.convert_to_intc(size)\n s_txtstr = library.convert_to_bytestrc(txtstr)\n i_strlng = library.convert_to_intc(strlng)\n i_width, ptr_width = library.make_intc_and_pointer()\n i_height, ptr_height = library.make_intc_and_pointer()\n library.keep_elem_refs(style, i_style, size, i_size, txtstr, s_txtstr, \\\n strlng, i_strlng, i_width, i_height, ptr_width, ptr_height)\n _fl_get_string_dimension(i_style, i_size, s_txtstr, i_strlng, \\\n ptr_width, ptr_height)\n return i_width.value, i_height.value", "def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)", "def get_data_info(data_path):\n if data_path.endswith('.npz'):\n data = np.load(data_path)\n labels = data['labels'][...]\n example_img = data['images'][:data['ids'][1]]\n out = {'size': len(labels),\n 'num_classes': len(np.unique(labels)),\n 'is_segment': False\n }\n elif data_path.endswith('.csv') or data_path.endswith('.txt'): # list of datasets\n lst = pd.read_csv(data_path)\n base_dir = os.path.dirname(data_path)\n segment0 = os.path.join(base_dir, lst['path'].tolist()[0])\n data = np.load(segment0)\n example_img = data['images'][:data['ids'][1]]\n out = {'size': sum(lst['N'].tolist()),\n 'num_classes': int(lst['num_classes'][0]),\n 'is_segment': True\n }\n else:\n raise TypeError(\"Error! dataset not supported.\")\n vocab_size = max(example_img) + 1\n out['vocab_size'] = vocab_size\n out['SEP'] = vocab_size - 3\n out['SOS'] = vocab_size - 2\n out['EOS'] = vocab_size - 1\n return out", "def fetchInfo(self, path):\n\n\n img = self.getImageObject(path)\n\n if isinstance(img, ImageFile):\n return img.size\n else:\n return [img.width, img.height]", "def get_dtype_info(dtype: type[np.dtype]) -> np.iinfo | np.finfo:\n try:\n dtype_info = np.iinfo(dtype)\n except ValueError:\n dtype_info = np.finfo(dtype)\n return dtype_info", "def dims(filespec, verbose=False):\n with open(filespec, \"rb\") as f:\n if f.read(4) == b\"\\x76\\x2f\\x31\\x01\": # EXR magic number\n version = np.frombuffer(f.read(4), dtype=\"<u4\")[0]\n max_strlen = 256 if (version & 0x400) else 32\n got_channels = False\n got_dims = False\n while not (got_channels and got_dims):\n attr_name = _read_string_nul(f, max_strlen)\n _ = _read_string_nul(f, max_strlen) # attr_type\n attr_size = np.frombuffer(f.read(4), dtype=\"<u4\")[0]\n if attr_name == \"channels\":\n nchan = 0\n isfloat = False\n bitdepth = 16\n while not got_channels:\n name = _read_string_nul(f, max_strlen)\n if len(name) >= 1:\n dtype = np.frombuffer(f.read(16), dtype=\"<u4\")[0]\n isfloat = isfloat or (dtype > 0)\n bitdepth = max(bitdepth, 16 if dtype == 1 else 32)\n nchan += 1\n else:\n got_channels = True\n elif attr_name == \"dataWindow\":\n box = np.frombuffer(f.read(16), dtype=\"<i4\")\n xmin, ymin, xmax, ymax = box\n width = xmax - xmin + 1\n height = ymax - ymin + 1\n got_dims = True\n else:\n _ = f.seek(attr_size, 1)\n if verbose:\n print(f\"Reading file {filespec} \", end='')\n print(f\"(w={width}, h={height}, c={nchan}, bitdepth={bitdepth})\")\n return width, height, nchan, isfloat, bitdepth\n raise RuntimeError(f\"File {filespec} is not a valid EXR file.\")", "def _GetImageInfo(self,path):\n hd = Header(path, scan=True)\n hdr = hd.hdr\n self.hdr = hdr\n if hdr is None:\n# Either a ref.dat file or it isn't an imaging file.\n if 'ref' in path and 'dat' in path:\n self.refdats[os.path.realpath(path)] = True\n info = {'type':'refdat'}\n return info\n else:\n return None\n elif hdr['filetype'] == 'dicom' and not path.endswith('.yaml'):\n# Write a yaml file to the raw data directory if possible.\n dirname, outfile = self._yaml_filename(path)\n yaml_name = '%s/%s' % (dirname, outfile)\n if not os.path.exists(yaml_name):\n# Create yaml file using dirname,\n# e.g., ../anatomicals/S2_EFGRE3D/s2_efgre3d.yaml\n try:\n hd.write_hdr_to_yaml('%s/%s' % (dirname,outfile))\n except IOError:\n# This is a nonessential function, so ignore exceptions\n# such as access violations.\n pass\n elif hdr['filetype'] == 'dicom' or hdr['filetype'] == 'ge_ifile':\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n shdr = hdr['subhdr']\n nhdr = hdr['native_header']\n self.shdr = shdr\n if 'dti' in shdr.get('PulseSequenceName','').lower() \\\n or 'dti' in nhdr.get('PulseSequenceFile',''):\n psdname = 'dti'\n else:\n psdname = os.path.basename((shdr.get('PulseSequenceName','').strip()).lower())\n info = {'psdname':psdname, \\\n 'acqtime':shdr['AcqTime'], \\\n 'series':int(shdr['SeriesNumber']), \\\n 'plane':hdr['plane'].strip(), \\\n 'type':self.imgtype.get(psdname,None), \\\n 'plane':hdr['plane'], \\\n 'acqtime':shdr['SeriesTime'], \\\n# 'fmapdir':None, \\\n 'refdat':None, \\\n 'imgfile':None, \\\n 'base':None, \\\n 'tdim':int(hdr['tdim']), \\\n 'echo_spacing':None, \\\n 'filetype':'brik', \\\n 'suffix':self.suffix.get(hdr['filetype'], 'brik'), \\\n 'data_filetype':hdr['filetype']}\n if info['type'] == 'localizer':\n# Don't process the localizer.\n return info\n if isinstance(info['acqtime'], int):\n info['acquisition_time'] = time.ctime(info['acqtime'])\n if nhdr.get('ImageFormat',('unknown'))[0] == 'DERIVED' and info['type'] == 'epi':\n# Sometimes screenshots are defined as epis.\n info['type'] = None\n\n# Call the method appropriate to the type of scan in this series.\n stat = apply( self.GetInfoMethods.get(info['type'], self._NoInfo), \\\n [info, path])\n if stat:\n info = {'type':'break'}\n return info\n info['suffix'] = self.suffix.get(info['filetype'], 'brik')\n return info", "def get_image_format_from_datatext(self, datatext):\n image_format = \"VIRT\"\n temp = re.search('VX_DF_IMAGE_(.+?)\\]', datatext) #Obs. Needed to ecape the [ ]'s\n if temp:\n image_format = temp.group(1)\n return image_format", "def getImageInformation(file_path):\n if os.path.isdir(file_path) == False:\n file_dir = os.path.basename(file_path)\n file_name = os.path.splitext(file_dir)[0]\n file_format = os.path.splitext(file_path)[1]\n return file_name, file_format", "def getheader(filename):\n # read header and convert to string\n h = np.fromfile(filename, dtype='uint8', count=512)\n header = ''\n for s in h[h > 0]:\n header += chr(s)\n # start reading at 'datatype'\n hd = header[header.lower().find('datatype'):]\n hd = hd.split(':')[0].replace(',', ' ').split()\n # Types: uint8 int16 int32 float32\n typelist = ['u1', 'i2', 'i4', 'f4']\n # extract datatype\n try:\n dtype = typelist[int(hd[0].split('=')[1]) - 1]\n except:\n print(header)\n raise IOError('getheader: datatype invalid or missing')\n # extract endianness\n try:\n if hd[-1].split('=')[0].lower() != 'endian':\n raise IndexError()\n endian = hd[-1].split('=')[1]\n except IndexError:\n print(header)\n raise IOError('getheader: endianess missing.')\n if endian.lower() == 'l':\n dtype = '<' + dtype\n else:\n dtype = '>' + dtype\n # extract dims\n try:\n if hd[2].split('=')[0].lower() != 'dims':\n raise IndexError()\n dims = int(hd[2].split('=')[1])\n if dims not in [2, 3]:\n raise ValueError('Invalid dims=%i (must be 2 or 3)' % dims)\n except IndexError:\n print(header)\n raise IOError('getheader: dims invalid or missing.')\n try:\n if hd[3].split('=')[0].lower() != 'nx':\n raise IndexError()\n nx = int(hd[3].split('=')[1])\n except:\n print(header)\n raise IOError('getheader: nx invalid or missing.')\n try:\n if hd[4].split('=')[0].lower() != 'ny':\n raise IndexError()\n ny = int(hd[4].split('=')[1])\n except:\n print(header)\n raise IOError('getheader: ny invalid or missing.')\n if dims == 3:\n try:\n if hd[5].split('=')[0].lower() != 'nt':\n raise IndexError()\n nt = int(hd[5].split('=')[1])\n except:\n print(header)\n raise IOError('getheader: nt invalid or missing.')\n shape = (nx, ny, nt)\n else:\n shape = (nx, ny)\n return [shape, dtype, header]", "def from_path(fname):\n def parse_header(lines):\n metadata = {}\n for ln in lines:\n if ln.startswith('#') or len(ln) < 2:\n continue\n match = re.match('(\\w+)\\s+([\\w\\s\\.]+)', ln)\n if not match:\n warnings.warn(\"warning: can't understand line: %s\" % ln)\n continue\n key, value = match.group(1).lower(), match.group(2)\n if key == 'version':\n metadata[key] = value\n elif key in ('fields', 'type'):\n metadata[key] = value.split()\n elif key in ('size', 'count'):\n metadata[key] = map(int, value.split())\n elif key in ('width', 'height', 'points'):\n metadata[key] = int(value)\n elif key == 'viewpoint':\n metadata[key] = map(float, value.split())\n elif key == 'data':\n metadata[key] = value.strip().lower()\n # TO-DO apparently count is not required?\n # add some reasonable defaults\n if 'count' not in metadata:\n metadata['count'] = [1] * len(metadata['fields'])\n if 'viewpoint' not in metadata:\n metadata['viewpoint'] = [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]\n if 'version' not in metadata:\n metadata['version'] = '.7'\n return metadata\n\n def _build_dtype(metadata_):\n \"\"\" build numpy structured array dtype from pcl metadata.\n note that fields with count > 1 are 'flattened' by creating multiple\n single-count fields.\n TO-DO: allow 'proper' multi-count fields.\n \"\"\"\n fieldnames = []\n typenames = []\n numpy_pcd_type_mappings = [(np.dtype('float32'), ('F', 4)),\n (np.dtype('float64'), ('F', 8)),\n (np.dtype('uint8'), ('U', 1)),\n (np.dtype('uint16'), ('U', 2)),\n (np.dtype('uint32'), ('U', 4)),\n (np.dtype('uint64'), ('U', 8)),\n (np.dtype('int16'), ('I', 2)),\n (np.dtype('int32'), ('I', 4)),\n (np.dtype('int64'), ('I', 8))]\n pcd_type_to_numpy_type = dict((q, p) for (p, q) in numpy_pcd_type_mappings)\n\n for f, c, t, s in zip(metadata_['fields'],\n metadata_['count'],\n metadata_['type'],\n metadata_['size']):\n np_type = pcd_type_to_numpy_type[(t, s)]\n if c == 1:\n fieldnames.append(f)\n typenames.append(np_type)\n else:\n fieldnames.extend(['%s_%04d' % (f, i) for i in xrange(c)])\n typenames.extend([np_type] * c)\n dtype = np.dtype(zip(fieldnames, typenames))\n return dtype\n\n def parse_binary_pc_data(f, dtype, metadata):\n rowstep = metadata['points'] * dtype.itemsize\n # for some reason pcl adds empty space at the end of files\n buf = f.read(rowstep)\n return np.fromstring(buf, dtype=dtype)\n\n def parse_binary_compressed_pc_data(f, dtype, metadata):\n # compressed size of data (uint32)\n # uncompressed size of data (uint32)\n # compressed data\n # junk\n fmt = 'II'\n compressed_size, uncompressed_size = struct.unpack(fmt, f.read(struct.calcsize(fmt)))\n compressed_data = f.read(compressed_size)\n # (compressed > uncompressed)\n # should we read buf as raw binary?\n buf = lzf.decompress(compressed_data, uncompressed_size)\n if len(buf) != uncompressed_size:\n raise Exception('Error decompressing data')\n # the data is stored field-by-field\n pcs_data = np.zeros(metadata['width'], dtype=dtype)\n ix = 0\n for dti in range(len(dtype)):\n dt = dtype[dti]\n bytess = dt.itemsize * metadata['width']\n column = np.fromstring(buf[ix:(ix + bytess)], dt)\n pcs_data[dtype.names[dti]] = column\n ix += bytess\n return pcs_data\n\n with open(fname, 'rb') as f:\n header = []\n while True:\n ln = f.readline().strip()\n header.append(ln)\n if ln.startswith('DATA'):\n metadata = parse_header(header)\n dtype = _build_dtype(metadata)\n break\n if metadata['data'] == 'ascii':\n pc_data = np.loadtxt(f, dtype=dtype, delimiter=' ')\n pc_data.dtype = np.float32\n pc_data = pc_data.reshape(-1, 4)\n elif metadata['data'] == 'binary':\n pc_data = parse_binary_pc_data(f, dtype, metadata)\n elif metadata['data'] == 'binary_compressed':\n pc_data = parse_binary_compressed_pc_data(f, dtype, metadata)\n else:\n print('File->py_pcd.py: DATA field is not \"ascii\",maybe \"binary\" or \"binary_compressed\", try to add method for both')\n return 'CODE: 0x123'\n pc = point_cloud(metadata, pc_data)\n return pc", "def getImageSize(language=None):", "def _determine_dtype(fields):\n # Check whether the required fields are there\n for field in _NRRD_REQUIRED_FIELDS:\n if field not in fields:\n raise NrrdError('Nrrd header misses required field: \"%s\".' % (field))\n\n # Process the data type\n np_typestring = _TYPEMAP_NRRD2NUMPY[fields['type']]\n # Endianness is not necessary for ASCII encoding type\n if np.dtype(np_typestring).itemsize > 1 and fields['encoding'] not in ['ascii', 'text', 'txt']:\n if 'endian' not in fields:\n raise NrrdError('Nrrd header misses required field: \"endian\".')\n if fields['endian'] == 'big':\n np_typestring = '>' + np_typestring\n elif fields['endian'] == 'little':\n np_typestring = '<' + np_typestring\n\n return np.dtype(np_typestring)", "def getimagesize(filename):\n img = Image.open(filename)\n (w,h) = img.size\n t = \"IMAGETYPE_%S\" % img.format\n a = \"width=\\\"%d\\\" height=\\\"%d\\\"\" % img.size\n return (w,h,t,a)", "def read_med_image(file_path, dtype):\n img_stk = sitk.ReadImage(file_path)\n img_np = sitk.GetArrayFromImage(img_stk)\n img_np = img_np.astype(dtype)\n return img_np, img_stk", "def getImageDescriptor(im):\n bb = '\\x2C' # Image separator,\n bb += intToBin( 0 ) # Left position\n bb += intToBin( 0 ) # Top position\n bb += intToBin( im.size[0] ) # image width\n bb += intToBin( im.size[1] ) # image height\n bb += '\\x87' # packed field : local color table flag1, interlace0, sorted table0, reserved00, lct size111=7=2^(7+1)=256.\n # LZW minimum size code now comes later, begining of [image data] blocks\n return bb", "def filename_type(filename):\n import re\n\n nii_re = re.compile(\".+(nii.gz)$|.+(nii)$\")\n npy_re = re.compile(\".+(npy)$|.+(npz)$\")\n\n\n if len(nii_re.findall(filename)):\n return 'nii'\n elif len(npy_re.findall(filename)):\n return 'npy'\n return None", "def get_image_size(self):", "def _build_dtype(metadata_):\n fieldnames = []\n typenames = []\n numpy_pcd_type_mappings = [(np.dtype('float32'), ('F', 4)),\n (np.dtype('float64'), ('F', 8)),\n (np.dtype('uint8'), ('U', 1)),\n (np.dtype('uint16'), ('U', 2)),\n (np.dtype('uint32'), ('U', 4)),\n (np.dtype('uint64'), ('U', 8)),\n (np.dtype('int16'), ('I', 2)),\n (np.dtype('int32'), ('I', 4)),\n (np.dtype('int64'), ('I', 8))]\n pcd_type_to_numpy_type = dict((q, p) for (p, q) in numpy_pcd_type_mappings)\n\n for f, c, t, s in zip(metadata_['fields'],\n metadata_['count'],\n metadata_['type'],\n metadata_['size']):\n np_type = pcd_type_to_numpy_type[(t, s)]\n if c == 1:\n fieldnames.append(f)\n typenames.append(np_type)\n else:\n fieldnames.extend(['%s_%04d' % (f, i) for i in xrange(c)])\n typenames.extend([np_type] * c)\n dtype = np.dtype(zip(fieldnames, typenames))\n return dtype", "def _image_data(buff):\n code = buff.getvalue()\n m = _size(code)\n if m:\n size = int(m.group(1))\n else:\n raise Exception('Internal error: PPM header not found')\n return code[m.end():], size", "def get_vtk_image_attrib(image):\n data = vtk_image_to_numpy(image)\n return (data.shape, data.dtype)", "def getTiffInfo(path):\n # py 2/3 comp\n first_file = glob.glob(os.path.join(path, '*.tif'))[0]\n if ScanImageTiffReader is not None and ScanImageTiffReader(first_file).metadata() != '':\n string = ScanImageTiffReader(first_file).metadata()\n else:\n tfh = tifffile.TiffFile(first_file)\n # If software key is in dict tags --> SI2016\n if 'software' in tfh.pages[0].tags:\n string = tfh.pages[0].tags['software'].value.decode('utf-8')\n else:\n string = tfh.pages[0].tags['image_description'].value.decode('utf-8')\n string = \" \".join(string.split()).replace('\\\\', ' ')\n string = string.replace(')', '')\n string = string.replace('(', '')\n return string", "def type_info(np_type):\n dt = np.dtype(np_type)\n np_type = dt.type\n width = dt.itemsize\n try: # integer type\n info = np.iinfo(dt)\n except ValueError:\n pass\n else:\n return dict(min=np_type(info.min), max=np_type(info.max), minexp=None,\n maxexp=None, nmant=None, nexp=None, width=width)\n info = np.finfo(dt)\n # Trust the standard IEEE types\n nmant, nexp = info.nmant, info.nexp\n ret = dict(min=np_type(info.min),\n max=np_type(info.max),\n nmant=nmant,\n nexp=nexp,\n minexp=info.minexp,\n maxexp=info.maxexp,\n width=width)\n if np_type in (_float16, np.float32, np.float64,\n np.complex64, np.complex128):\n return ret\n info_64 = np.finfo(np.float64)\n if dt.kind == 'c':\n assert np_type is np.longcomplex\n vals = (nmant, nexp, width / 2)\n else:\n assert np_type is np.longdouble\n vals = (nmant, nexp, width)\n if vals in ((112, 15, 16), # binary128\n (info_64.nmant, info_64.nexp, 8), # float64\n (63, 15, 12), (63, 15, 16)): # Intel extended 80\n return ret # these are OK without modification\n # The remaining types are longdoubles with bad finfo values. Some we\n # correct, others we wait to hear of errors.\n # We start with float64 as basis\n ret = type_info(np.float64)\n if vals in ((52, 15, 12), # windows float96\n (52, 15, 16)): # windows float128?\n # On windows 32 bit at least, float96 is Intel 80 storage but operating\n # at float64 precision. The finfo values give nexp == 15 (as for intel\n # 80) but in calculations nexp in fact appears to be 11 as for float64\n ret.update(dict(width=width))\n return ret\n if vals == (105, 11, 16): # correctly detected double double\n ret.update(dict(nmant=nmant, nexp=nexp, width=width))\n return ret\n # Oh dear, we don't recognize the type information. Try some known types\n # and then give up. At this stage we're expecting exotic longdouble or\n # their complex equivalent.\n if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32):\n raise FloatingError('We had not expected type %s' % np_type)\n if (vals == (1, 1, 16) and on_powerpc() and\n _check_maxexp(np.longdouble, 1024)):\n # double pair on PPC. The _check_nmant routine does not work for this\n # type, hence the powerpc platform check instead\n ret.update(dict(nmant=106, width=width))\n elif (_check_nmant(np.longdouble, 52) and\n _check_maxexp(np.longdouble, 11)):\n # Got float64 despite everything\n pass\n elif (_check_nmant(np.longdouble, 112) and\n _check_maxexp(np.longdouble, 16384)):\n # binary 128, but with some busted type information. np.longcomplex\n # seems to break here too, so we need to use np.longdouble and\n # complexify\n two = np.longdouble(2)\n # See: https://matthew-brett.github.io/pydagogue/floating_point.html\n max_val = (two ** 113 - 1) / (two ** 112) * two ** 16383\n if np_type is np.longcomplex:\n max_val += 0j\n ret = dict(min=-max_val,\n max=max_val,\n nmant=112,\n nexp=15,\n minexp=-16382,\n maxexp=16384,\n width=width)\n else: # don't recognize the type\n raise FloatingError('We had not expected long double type %s '\n 'with info %s' % (np_type, info))\n return ret", "def data_info(data):\n filename = data[\"filename\"]\n X_var = data[\"X_var\"]\n Y_var = data[\"Y_var\"]\n X,Y = read_file(filename,X_var,Y_var)\n input_dim = len(X_var)\n output_dim = len(Y_var)\n return X,Y,input_dim,output_dim", "def get_dtype(path,nrows=10):\n if nrows is not None:\n train = pd.read_csv(path,nrows=nrows)\n else:\n train = pd.read_pickle(path.replace('.csv','.pkl'))\n col2dtype = OrderedDict()\n for col in train.columns:\n if train[col].dtype=='O':\n col2dtype[col] = 'str'\n elif train[col].dtype==np.int64:\n col2dtype[col] = 'int32'\n else:\n col2dtype[col] = 'float32'\n return col2dtype", "def image_info(path):\n global working_img\n working_img = Image.open(path)\n print('=======================================')\n print(f'이미지 파일 이름:{working_img.filename}')\n print(f'이미지 파일 파일 형식:{working_img.format}')\n print(f'이미지 용량:{working_img.size}')\n print(f'이미지 색상모드:{working_img.mode}')\n print(f'이미지 크기:{working_img.width}x{working_img.height}')", "def getDimension(data):\r\n # open image for reading in binary mode\r\n\r\n # read the 2 bytes\r\n a = data[163:165]\r\n\r\n # calculate height\r\n height = (a[0] << 8) + a[1]\r\n\r\n # next 2 bytes is width\r\n a = data[165:167]\r\n\r\n # calculate width\r\n width = (a[0] << 8) + a[1]\r\n\r\n return (width, height)", "def get_itk_data(path_or_image, verbose=False):\n\n if isinstance(path_or_image, str):\n image = get_itk_image(path_or_image)\n else:\n image = path_or_image\n\n arr = itk.GetArrayFromImage(image)\n shape = arr.shape\n spacing = image.GetSpacing()[::-1]\n data_type = arr.dtype\n\n if verbose:\n print '\\t image shape: ' + str(shape)\n print '\\t image spacing: ' + str(spacing)\n print '\\t image data type: ' + str(data_type)\n\n return arr, shape, spacing", "def extract_labels(filename, num_images):\n\n# this function definition has been taken from internet \n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) #Interpret a buffer as a 1-dimensional array\n return labels", "def datatype_from_extension(filename):\n mapping = {'slc': 1,\n 'slc_dec': 1,\n 'mli': 0,\n 'mli_dec': 0,\n 'inc': 0,\n 'dem_seg': 0,\n 'dem': 0,\n 'int': 1,\n 'sm': 1,\n 'cc': 0,\n \"sim_sar\": 0,\n \"ls_map\": 3,\n \"bmp\": 2}\n for i in [0, 1, 2, 3]:\n for j in [0, 1, 2, 3]:\n mapping[\"c{i}{j}\".format(i=i, j=j)] = 1 # covariance elements\n # Split the file name to the latest extesnsion\n extension = filename.split('.')[-1]\n return mapping[extension]", "def read_dimensions(self, path_meta, path_data):\n if path_meta.endswith('.mdd'): path_meta = path_meta.replace('.mdd', '')\n if path_data.endswith('.ddf'): path_data = path_data.replace('.ddf', '')\n self._meta, self._data = r_dimensions(path_meta+'.mdd', path_data+'.ddf')\n self._set_file_info(path_data, path_meta)\n if not self._dimensions_comp == 'ignore':\n d_comp = self._dimensions_comp\n self._meta['info']['dimensions_comp'] = d_comp\n self.set_dim_suffix()\n self.undimensionize()\n if d_comp is True: self.dimensionize()\n self._rename_blacklist_vars()\n return None", "def image_data_info(page):\n xObject = page['/Resources']['/XObject'].getObject()\n\n for obj_key in xObject:\n obj = xObject[obj_key]\n if obj['/Subtype'] == '/Image':\n width, height = (obj['/Width'], obj['/Height'])\n num_bytes = len(obj._data)\n density = num_bytes * 1.0 / (width * height)\n return {'width': width, 'height': height, 'size': num_bytes, 'density': density}\n\n return None", "def image_data_format():\n return _IMAGE_DATA_FORMAT", "def loadSize(filepath):\n \n from osgeo import gdal\n \n ds = gdal.Open(filepath, 0)\n \n return ds.RasterYSize, ds.RasterXSize", "def get_image_format_for_scale(scale=1.0):\n img = read_single(1, -70, 0, scale)\n return img.shape", "def _get_image_type_from_array(arr):\n if len(arr.shape) == 3 and arr.shape[2] == 3:\n # 8-bit x 3 colors\n return 'RGB'\n elif len(arr.shape) == 2:\n # 8-bit, gray-scale\n return 'L'\n else:\n raise ValueError(\n 'Input array must have either 2 dimensions or 3 dimensions where the '\n 'third dimension has 3 channels. i.e. arr.shape is (x,y) or (x,y,3). '\n 'Found shape {}.'.format(arr.shape))", "def test_rt_metadata():\n \n img = lena()\n\n textdata = \"This is some arbitrary metadata for a text field\"\n info = TiffImagePlugin.ImageFileDirectory()\n\n info[tag_ids['ImageJMetaDataByteCounts']] = len(textdata)\n info[tag_ids['ImageJMetaData']] = textdata\n\n f = tempfile(\"temp.tif\")\n\n img.save(f, tiffinfo=info)\n \n loaded = Image.open(f)\n\n assert_equal(loaded.tag[50838], (len(textdata),))\n assert_equal(loaded.tag[50839], textdata)", "def read_label_data(mode, image_type):\n return np.loadtxt(parse_path(mode, image_type, True), dtype=int, delimiter='\\n')", "def getImage(self, dtype=float64):\n fname = '%s::%s'%(self.__class__.__name__, self.getImage.__name__)\n if (not self.lhaveImage):\n print(\"%s: DSM image not yet computed\"%fname)\n return None\n ngrd = c_int(1)\n ierr = c_int(1)\n self.lib.xcloc_getNumberOfGridPointsInImage(ngrd, ierr)\n ngrd = ngrd.value\n if (ierr.value != 0):\n print(\"%s: Failed to get image size\"%fname)\n return None\n if (dtype == float64):\n grd = ascontiguousarray(zeros(ngrd, dtype=c_double))\n grdPtr = grd.ctypes.data_as(POINTER(c_double)) \n self.lib.xcloc_getImage64f(ngrd, grdPtr, ierr)\n else:\n grd = ascontiguousarray(zeros(ngrd, dtype=c_float))\n grdPtr = grd.ctypes.data_as(POINTER(c_float))\n self.lib.xcloc_getImage32f(ngrd, grdPtr, ierr)\n if (ierr.value != 0):\n print(\"%s: Failed to get image\"%fname)\n return None\n return grd", "def identify(path):\n\n if not os.path.exists(path):\n return {}\n\n out, _ = processutils.execute(\n 'qemu-img info %s' % path, shell=True)\n\n data = {}\n for line in out.split('\\n'):\n line = line.lstrip().rstrip()\n elems = line.split(': ')\n if len(elems) > 1:\n key = elems[0]\n value = ': '.join(elems[1:])\n\n m = VALUE_WITH_BRACKETS_RE.match(value)\n if m:\n value = float(m.group(1))\n\n elif value.endswith('K'):\n value = float(value[:-1]) * 1024\n elif value.endswith('M'):\n value = float(value[:-1]) * 1024 * 1024\n elif value.endswith('G'):\n value = float(value[:-1]) * 1024 * 1024 * 1024\n elif value.endswith('T'):\n value = float(value[:-1]) * 1024 * 1024 * 1024 * 1024\n\n try:\n data[key] = float(value)\n except Exception:\n data[key] = value\n\n return data", "def determineTypeBounds(data):\n folder= data[\"folder\"]\n msg = \"Initial upload\"\n if not data[\"zipdata\"]:\n msg=\"Data uploaded previously - Use force to remove and reload.\"\n type = None\n file = None\n bounds= None\n shapefiles = findfiles(['*.shp'],where=folder)\n if shapefiles:\n type=\"shapefile\"\n file = os.path.join(folder,shapefiles[0])\n bounds=geoBoundsMetadata(file)\n else:\n try:\n imgfiles=findfiles(['*.jpg','*.tif','*.tiff','*.png'],where=folder)\n if imgfiles:\n file = os.path.join(folder,imgfiles[0])\n bounds=geoBoundsMetadata(file,format=\"image\")\n type=\"image\"\n else:\n raise Exception(\"No suitable georeferenced or scanned image file found\")\n except:\n type=\"iiif\"\n bounds=None\n return {\"file\":file,\"folder\":folder,\"bounds\":bounds,\"type\":type,\"msg\":msg,\"zipurl\":data[\"zipurl\"]}", "def get_kernel_and_dilation_from_long_name(file):\n kernels, dilations = None, None\n if ('3333' in file) or ('k3' in file):\n kernels = [3, 3, 3, 3]\n elif '2222' in file:\n kernels = [2, 2, 2, 2]\n elif 'k_1111' in file:\n kernels = [1, 1, 1, 1]\n elif 'k_4444' in file:\n kernels = [4, 4, 4, 4]\n\n if 'dilations_1111' in file:\n dilations = [1, 1, 1, 1]\n elif 'dilations_24' in file:\n dilations = [2, 4, 8, 16]\n else:\n dilations = [3, 9, 27, 81]\n\n return kernels, dilations", "def get_field_size(type_str):\n return ULog._UNPACK_TYPES[type_str][1]", "def generate_image_info(path):\n file_types = ['*.png', '*.jpg', '*.gif']\n for file_type in file_types:\n for img_path in glob.glob(path + file_type):\n img = Image.open(img_path)\n img_name = img_path.split('/')[-1].split('.')[0]\n with open(path + 'resolution.txt', 'a') as file:\n file.write(img_name + ' ' + str(img.size[0]) +\n ' ' + str(img.size[1]) + '\\n')", "def get_dtype_and_shape(self, val):\n # get type of object as string\n val_type = str(type(val))\n matchObj = re.match(r\"<(type|class) '([^']+)'>\", val_type)\n if not matchObj:\n raise SystemError(\"** Error: Unable to find type in %s\" % val_type)\n val_type = matchObj.group(2)\n # check for \"value_info\" passed in through calling script (e.g. Matlab)\n # if so, then type and shape is given in val (it does not contain the actual data\n # to store.\n if val_type == 'str' and self.file.options['storage_method'] == 'none':\n # value_info string looks like the following:\n # value_info: type=\"float\", shape=\"[5]\" *OR*\n # value_info: type=\"float\", shape=\"[scalar]\"\n matchObj = re.match(r'^value_info: type=\"([^\"]+)\", shape=\"\\[([^\\]]+)\\]\"$', val)\n if matchObj:\n dtype = matchObj.group(1)\n shape = matchObj.group(2)\n if shape != 'scalar':\n # convert dimensions from string (like '4 5') to integer list\n shape = map(int, shape.split())\n return (dtype, shape)\n # check data shape and type \n if val_type in ('str', 'int', 'float', 'long', 'unicode', 'bool'):\n shape = \"scalar\"\n dtype = val_type\n elif val_type == 'list':\n # convert from list to np array to get shape\n a = np.array(val)\n shape = a.shape\n dtype = str(a.dtype)\n # print \"found list, dtype is %s, shape is:\" % dtype\n # pp.pprint (shape)\n elif 'numpy' in val_type or type(val) is h5py._hl.dataset.Dataset: \n shape = val.shape\n dtype = str(val.dtype)\n # print \"found numpy or h5py dataset, dtype is %s\", dtype\n else:\n print \"** Error, unable to determine shape of value assiged to dataset\"\n print \"value type is '%s'\" % val_type\n traceback.print_stack()\n sys.exit(1)\n return (dtype, shape)", "def get_data(view: sublime.View, path: str) -> 'Tuple[int, int, int, int, int]':\n\n # set max dimensions to 75% of the viewport\n max_width, max_height = view.viewport_extent()\n max_width *= 0.75\n max_height *= 0.75\n max_ratio = max_height / max_width\n\n try:\n real_width, real_height, size = get_image_size(path)\n except UnknownImageFormat:\n return -1, -1, -1, -1, -1\n\n # First check height since it's the smallest vector\n if real_height / real_width >= max_ratio and real_height > max_height:\n width = real_width * max_height / real_height\n height = max_height\n elif real_height / real_width <= max_ratio and real_width > max_width:\n width = max_width\n height = real_height * max_width / real_width\n else:\n width = real_width\n height = real_height\n\n return width, height, real_width, real_height, size", "def _get_dtype_len(in_dtype):\n\n temp_dtype = in_dtype.lower()\n\n if temp_dtype in (\"int8\", \"uint8\"):\n byte_len = 1\n elif temp_dtype in (\"float16\", \"int16\", \"uint16\"):\n byte_len = 2\n elif temp_dtype in (\"float32\", \"int32\", \"uint32\"):\n byte_len = 4\n elif temp_dtype in (\"int64\", \"uint64\"):\n byte_len = 8\n\n return byte_len", "def retrieve_geotiff_metadata(sFilename_geotiff_in):\n pDriver = gdal.GetDriverByName('GTiff')\n \n pDataset = gdal.Open(sFilename_geotiff_in, gdal.GA_ReadOnly)\n\n if pDataset is None:\n print(\"Couldn't open this file: \" + sFilename_geotiff_in)\n sys.exit(\"Try again!\")\n else: \n pProjection = pDataset.GetProjection()\n pSpatial_reference = osr.SpatialReference(wkt=pProjection) \n ncolumn = pDataset.RasterXSize\n nrow = pDataset.RasterYSize \n pGeotransform = pDataset.GetGeoTransform()\n dOriginX = pGeotransform[0]\n dOriginY = pGeotransform[3]\n dPixelWidth = pGeotransform[1]\n pPixelHeight = pGeotransform[5] \n return dPixelWidth, dOriginX, dOriginY, nrow, ncolumn, pSpatial_reference, pProjection, pGeotransform", "def get_ext(path):\n if os.path.exists(path):\n return imghdr.what(path)\n else:\n print(\"fichier n'existe pas\")", "def type(path):", "def _read(path, desired_size):\n \n dcm = pydicom.dcmread(path)\n\n slope, intercept = dcm.RescaleSlope, dcm.RescaleIntercept\n \n try:\n img = (dcm.pixel_array * slope + intercept)\n except:\n img = np.zeros(desired_size[:2])-1\n \n if img.shape != desired_size[:2]:\n img = cv2.resize(img, desired_size[:2], interpolation=cv2.INTER_LINEAR)\n \n img = _normalize(img)\n \n # return np.stack((img,)*3, axis=-1)\n return img", "def read_image_data(mode, image_type):\n txtdata = np.loadtxt(parse_path(mode, image_type, False), dtype=str, delimiter='\\n', comments=None)\n num_rows = image_type.rows\n num_images = len(txtdata) // num_rows\n return txtdata.reshape((num_images, num_rows))", "def _dtype(self):\n if self._dtype_ is not None:\n return self._dtype_\n dtype = None\n for raw_extra, filename in zip(self._raw_extras, self._filenames):\n for ent in raw_extra[\"ent\"]:\n if ent is not None:\n with _fiff_get_fid(filename) as fid:\n fid.seek(ent.pos, 0)\n tag = read_tag_info(fid)\n if tag is not None:\n if tag.type in (\n FIFF.FIFFT_COMPLEX_FLOAT,\n FIFF.FIFFT_COMPLEX_DOUBLE,\n ):\n dtype = np.complex128\n else:\n dtype = np.float64\n if dtype is not None:\n break\n if dtype is not None:\n break\n if dtype is None:\n raise RuntimeError(\"bug in reading\")\n self._dtype_ = dtype\n return dtype", "def _image_hdr(self, hdr):\n # Called ... in OpenMIMS\n d = {}\n d['header size'], d['type'], d['width'], d['height'], \\\n d['bytes per pixel'], d['masses'], d['planes'], \\\n d['raster'], d['original filename'] = \\\n unpack(self._bo + 'i 6h i 64s', hdr.read(84))\n\n # Called nickname in OpenMIMS\n d['original filename'] = self._cleanup_string(d['original filename'])\n if d['header size'] != 84:\n raise ValueError(\"Image header size is {}, not 84.\".format(d['header size']))\n return d", "def pngxy(data):\n ihdr = data.index(b'IHDR')\n # next 8 bytes are width/height\n w4h4 = data[ihdr+4:ihdr+12]\n return struct.unpack('>ii', w4h4)", "def get_image_size(fname):\r\n \r\n logging.debug('get_image_size({})'.format(fname))\r\n\r\n with open(fname, 'rb') as fhandle:\r\n head = fhandle.read(24)\r\n if len(head) != 24:\r\n return\r\n if imghdr.what(fname) == 'png':\r\n check = struct.unpack('>i', head[4:8])[0]\r\n if check != 0x0d0a1a0a:\r\n logging.debug('get_image_size - Stopping application with exit code \"2\"\\n')\r\n sys.exit(2)\r\n width, height = struct.unpack('>ii', head[16:24])\r\n elif imghdr.what(fname) == 'gif':\r\n width, height = struct.unpack('<HH', head[6:10])\r\n elif imghdr.what(fname) == 'jpeg':\r\n try:\r\n fhandle.seek(0) # Read 0xff next\r\n size = 2\r\n ftype = 0\r\n while not 0xc0 <= ftype <= 0xcf:\r\n fhandle.seek(size, 1)\r\n byte = fhandle.read(1)\r\n while ord(byte) == 0xff:\r\n byte = fhandle.read(1)\r\n ftype = ord(byte)\r\n size = struct.unpack('>H', fhandle.read(2))[0] - 2\r\n # We are at a SOFn block\r\n fhandle.seek(1, 1) # Skip `precision' byte.\r\n height, width = struct.unpack('>HH', fhandle.read(4))\r\n except Exception: #IGNORE:W0703\r\n logging.debug('get_image_size - Stopping application with exit code \"2\"\\n')\r\n sys.exit(2)\r\n else:\r\n logging.debug('get_image_size - Stopping application with exit code \"2\"\\n')\r\n sys.exit(2)\r\n logging.debug('get_image_size - width, height = {}, {}'.format(width, height))\r\n return width, height", "def GetFormatType(format_):\n\n formattype = format_\n bitsize = 0\n if isinstance(format_, str):\n match = re.search(r'\\s*(\\D+)', format_)\n if match:\n formattype = match.group(0)\n bitsize = struct.calcsize(formattype) * 8\n return formattype, bitsize", "def get_image_info(path):\n try:\n image = Image.open(path)\n except IOError:\n logger.error(f\"'{path}' is not an image\")\n return\n\n if image.format != \"JPEG\":\n logger.error(f\"'{path}' is not a JPEG\")\n return\n\n info = {\n \"filename\": path,\n \"width\": image.width,\n \"height\": image.height,\n \"fileSize\": os.path.getsize(path),\n \"md5\": md5sum_file(path),\n }\n return info", "def get_dtype_sarray(name,\n colnames=None):\n \n def ConsistentTrees_ASCII_099():\n \"\"\"\n Property names and column in Consitent-Tree ascii (CT) files (after runing it on ROCKSTAR), original labels of properties in the file are commanded. \n Label names are converted to internal universal name convention. This is the bucket for version 0.99 of CT\n\n \n List of property labels in Rockstar ascii files: \n scale(0) id(1) desc_scale(2) desc_id(3) num_prog(4) pid(5) upid(6) desc_pid(7) phantom(8) sam_mvir(9) mvir(10) rvir(11) rs(12) vrms(13) mmp?(14)\n scale_of_last_MM(15) vmax(16) x(17) y(18) z(19) vx(20) vy(21) vz(22) Jx(23) Jy(24) Jz(25) Spin(26) Breadth_first_ID(27) Depth_first_ID(28) Tree_root_ID(29)\n Orig_halo_ID(30) Snap_num(31) Next_coprogenitor_depthfirst_ID(32) Last_progenitor_depthfirst_ID(33) Rs_Klypin Mvir_all M200b M200c M500c M2500c Xoff Voff\n Spin_Bullock b_to_a c_to_a A[x] A[y] A[z] b_to_a(500c) c_to_a(500c) A[x](500c) A[y](500c) A[z](500c) T/|U| M_pe_Behroozi M_pe_Diemer Halfmass_Radius\n \"\"\"\n dt= [('a' , np.float32), #scale(0)\n ('haloid_CT' , np.int64), #id(1)\n ('a_desc' , np.float32), #desc_scale(2)\n ('descID' , np.int64), #desc_id(3)\n ('n_prog' , np.int32), #num_prog(4)\n ('hostid_LM' , np.int64), #pid(5) \n ('hostid_MM' , np.int64), #upid(6)\n ('desc_hostid_MM' , np.int64), #desc_pid(7)\n ('is_phantom' , np.int32), #phantom(8) \n ('sam_mvir' , np.float32), #sam_mvir(9)\n ('mhalo' , np.float32), #mvir(10) \n ('rvir' , np.float32), #rvir(11)\n ('rscale' , np.float32), #rs(12)\n ('vrms' , np.float32), #vrms(13) \n ('is_mmp' , np.int32), #mmp?(14)\n ('a_lastMM' , np.float32), #scale_of_last_MM(15)\n ('vmax' , np.float32), #vmax(16)\n ('x_pos' , np.float32), #x(17)\n ('y_pos' , np.float32), #y(18)\n ('z_pos' , np.float32), #z(19)\n ('x_vel' , np.float32), #vx(20)\n ('y_vel' , np.float32), #vy(21)\n ('z_vel' , np.float32), #vz(22)\n ('x_ang' , np.float32), #Jx(23)\n ('y_ang' , np.float32), #Jy(24)\n ('z_ang' , np.float32), #Jz(25)\n ('spinParameter' , np.float32), #Spin(26)\n ('BFirstID' , np.int64), #Breadth_first_ID(27)\n ('DFirstID' , np.int64), #Depth_first_ID(28) \n ('rootIndex' , np.int64), #Tree_root_ID(29) \n ('haloid' , np.int64), #Orig_halo_ID(30)\n ('snapid' , np.int32), #Snap_num(31) \n ('NextCoDFirstID' , np.int64), #Next_coprogenitor_depthfirst_ID(32)\n ('LastDFirstID' , np.int64), #Last_progenitor_depthfirst_ID(33) \n ('rscale_Klypin' , np.float32), #rs_Klypin\n ('mhalo+unbound' , np.float32), #mvir_all \n ('mhalo_200b' , np.float32), #m200b \n ('mhalo_200c' , np.float32), #m200c\n ('mhalo_500c' , np.float32), #m500c\n ('mhalo_2500c' , np.float32), #m2500c\n ('x_off' , np.float32), #Xoff\n ('v_off' , np.float32), #Yoff\n ('spin_Bullock' , np.float32), #spin_bullock\n ('b_to_a' , np.float32), #b_to_a \n ('c_to_a' , np.float32), #c_to_a\n ('x_a' , np.float32), #A[x]\n ('y_a' , np.float32), #A[y]\n ('z_a' , np.float32), #A[z] \n ('b_to_a_500c' , np.float32), #b_to_a(500c)\n ('c_to_a_500c' , np.float32), #c_to_a(500c) \n ('x_a_500c' , np.float32), #A[x](500c) \n ('y_a_500c' , np.float32), #A[y](500c) \n ('z_a_500c' , np.float32), #A[z](500c)\n ('T_U' , np.float32), #T/|U|\n ('Mpseudo_Behroozi', np.float32), #M_pe_Behroozi\n ('Mpseudo_Diemer' , np.float32), #M_pe_Diemer\n ('rhalf_mass' , np.float32), #Halfmass_Radius \n ] \n return dt\n\n def ConsistentTrees_ASCII_101():\n \"\"\"\n Property names and column in Consitent-Tree ascii (CT) files (after runing it on ROCKSTAR), original labels of properties in the file are commanded. \n Label names are converted to internal universal name convention. This is the bucket for version 1.01 of CT. This version has the very import\n mainLeaf_depthFirstIDincluded\n\n \n List of property labels in Rockstar ascii files: \n scale(0) id(1) desc_scale(2) desc_id(3) num_prog(4) pid(5) upid(6) desc_pid(7) phantom(8) sam_mvir(9) mvir(10) rvir(11) rs(12) vrms(13) mmp?(14)\n scale_of_last_MM(15) vmax(16) x(17) y(18) z(19) vx(20) vy(21) vz(22) Jx(23) Jy(24) Jz(25) Spin(26) Breadth_first_ID(27) Depth_first_ID(28) Tree_root_ID(29)\n Orig_halo_ID(30) Snap_num(31) Next_coprogenitor_depthfirst_ID(32) Last_progenitor_depthfirst_ID(33) Rs_Klypin Mvir_all M200b M200c M500c M2500c Xoff Voff\n Spin_Bullock b_to_a c_to_a A[x] A[y] A[z] b_to_a(500c) c_to_a(500c) A[x](500c) A[y](500c) A[z](500c) T/|U| M_pe_Behroozi M_pe_Diemer Halfmass_Radius\n \n \n #scale(0) id(1) desc_scale(2) desc_id(3) num_prog(4) pid(5) upid(6) desc_pid(7) phantom(8) sam_Mvir(9) Mvir(10) Rvir(11) rs(12) vrms(13) mmp?(14)\n scale_of_last_MM(15) vmax(16) x(17) y(18) z(19) vx(20) vy(21) vz(22) Jx(23) Jy(24) Jz(25) Spin(26) Breadth_first_ID(27) Depth_first_ID(28) Tree_root_ID(29)\n Orig_halo_ID(30) Snap_idx(31) Next_coprogenitor_depthfirst_ID(32) Last_progenitor_depthfirst_ID(33) Last_mainleaf_depthfirst_ID(34) Tidal_Force(35) Tidal_ID(36) \n Rs_Klypin(37) Mvir_all(38) M200b(39) M200c(40) M500c(41) M2500c(42) Xoff(43) Voff(44) Spin_Bullock(45) b_to_a(46) c_to_a A[x](47) A[y](48) A[z](49) b_to_a(500c)(50) c_to_a(500c)(51) A[x](500c)(52) A[y](500c)(53) A[z](500c)(54) T/|U|(55) M_pe_Behroozi(56) M_pe_Diemer(57) Halfmass_Radius(58)\n \"\"\"\n dt= [('a' , np.float32), #scale(0)\n ('haloid_CT' , np.int64), #id(1)\n ('a_desc' , np.float32), #desc_scale(2)\n ('descID' , np.int64), #desc_id(3)\n ('n_prog' , np.int32), #num_prog(4)\n ('hostid_LM' , np.int64), #pid(5) \n ('hostid_MM' , np.int64), #upid(6)\n ('desc_hostid_MM' , np.int64), #desc_pid(7)\n ('is_phantom' , np.int32), #phantom(8) \n ('sam_mvir' , np.float32), #sam_mvir(9)\n ('mhalo' , np.float32), #mvir(10) \n ('rvir' , np.float32), #rvir(11)\n ('rscale' , np.float32), #rs(12)\n ('vrms' , np.float32), #vrms(13) \n ('is_mmp' , np.int32), #mmp?(14)\n ('a_lastMM' , np.float32), #scale_of_last_MM(15)\n ('vmax' , np.float32), #vmax(16)\n ('x_pos' , np.float32), #x(17)\n ('y_pos' , np.float32), #y(18)\n ('z_pos' , np.float32), #z(19)\n ('x_vel' , np.float32), #vx(20)\n ('y_vel' , np.float32), #vy(21)\n ('z_vel' , np.float32), #vz(22)\n ('x_ang' , np.float32), #Jx(23)\n ('y_ang' , np.float32), #Jy(24)\n ('z_ang' , np.float32), #Jz(25)\n ('spinParameter' , np.float32), #Spin(26)\n ('BFirstID' , np.int64), #Breadth_first_ID(27)\n ('DFirstID' , np.int64), #Depth_first_ID(28) \n ('rootIndex' , np.int64), #Tree_root_ID(29) \n ('haloid' , np.int64), #Orig_halo_ID(30)\n ('snapid' , np.int32), #Snap_num(31) \n ('NextCoDFirstID' , np.int64), #Next_coprogenitor_depthfirst_ID(32)\n ('LastDFirstID' , np.int64), #Last_progenitor_depthfirst_ID(33)\n ('LastMLDFirstID', np.int64),#Last_mainleaf_depthfirst_ID(34)\n ('tidalForce' , np.float32), #Tidal_Force\n ('tidalID' , np.int64), #Tidal_ID \n ('rscale_Klypin' , np.float32), #rs_Klypin\n ('mhalo+unbound' , np.float32), #mvir_all \n ('mhalo_200b' , np.float32), #m200b \n ('mhalo_200c' , np.float32), #m200c\n ('mhalo_500c' , np.float32), #m500c\n ('mhalo_2500c' , np.float32), #m2500c\n ('x_off' , np.float32), #Xoff\n ('v_off' , np.float32), #Yoff\n ('spin_Bullock' , np.float32), #spin_bullock\n ('b_to_a' , np.float32), #b_to_a \n ('c_to_a' , np.float32), #c_to_a\n ('x_a' , np.float32), #A[x]\n ('y_a' , np.float32), #A[y]\n ('z_a' , np.float32), #A[z] \n ('b_to_a_500c' , np.float32), #b_to_a(500c)\n ('c_to_a_500c' , np.float32), #c_to_a(500c) \n ('x_a_500c' , np.float32), #A[x](500c) \n ('y_a_500c' , np.float32), #A[y](500c) \n ('z_a_500c' , np.float32), #A[z](500c)\n ('T_U' , np.float32), #T/|U|\n ('Mpseudo_Behroozi', np.float32), #M_pe_Behroozi\n ('Mpseudo_Diemer' , np.float32), #M_pe_Diemer\n ('rhalf_mass' , np.float32), #Halfmass_Radius \n ] \n return dt\n\n \n\n def ConsistentTrees_basic_ASCII():\n \"\"\"\n Property names and column in Consitent-Tree ascii files (after runing it on ROCKSTAR), original labels of properties in the file are commanded. \n Label names are converted to internal universal name convention.\n\n \n Basic list of property labels in ConsistentTree ascii files: \n # snapid(1) a(2) a_desc(3) a_lastMM(4) rootIndex(5) descID(6) DFirstID(7) LastDFirstID(8) LastMLDFirstID(9) haloid_CT(10) orignal_haloid_RS(11) '\n x_pos(12) y_pos(13) z_pos(14) mhalo(15) vmax(16) rhalf_mass(17) rvir(18) T_U(19) subTreeID(20)\n \"\"\"\n dt= [('snapid' , np.int32), #(1) haloid \n ('a' , np.float32), #(2) descIndex\n ('a_desc' , np.float32), #(3) rootIndex \n ('a_lastMM' , np.float32), #(4) predIndex\n ('rootIndex' , np.int64), #(5) Mvir [h-1Msun] \n ('descID' , np.int64), #(6) delta_Mvir [h-1Msun]\n ('DFirstID' , np.int64), #(7) Rvir [h-1kpc]\n ('LastDFirstID' , np.int64), #(8) delt_Rvir [h-1kpc] \n ('LastMLDFirstID' , np.int64), #(12) snapid\n ('haloid_CT' , np.int64), #(13) n_particles\n ('orignal_haloid_RS' , np.int64), #(14) n_progs\n ('x_pos' , np.float32), #(9) X [h-1Mpc]\n ('y_pos' , np.float32), #(10) Y [h-1Mpc]\n ('z_pos' , np.float32), #(11) Z [h-1Mpc]\n ('mhalo' , np.float32), #(9) X [h-1Mpc]\n ('vmax' , np.float32), #(10) Y [h-1Mpc]\n ('rhalf_mass' , np.float32), #(11) Z [h-1Mpc]\n ('rvir' , np.float32), #(9) X [h-1Mpc]\n ('T_U' , np.float32), #(10) Y [h-1Mpc]\n ('subTreeID' , np.int32), #(11) Z [h-1Mpc] \n ] \n return dt\n\n \n \n\n def stats_basic_bucket():\n\n dt = [\n ('mhalo1' , np.float32),\n ('delta_mhalo' , np.float32),\n ('delta_mhalo_perc' , np.float32), \n ('rvir1' , np.float32),\n ('delta_rvir' , np.float32),\n ('delta_rvir_perc' , np.float32), \n ('n_particles1' , np.float32),\n ('n_particles2' , np.float32),\n ('n_particles_shared' , np.float32),\n ('n_particles_shared_perc1' , np.float32), \n ('n_particles_shared_perc2' , np.float32),\n ('delta_x_pos_perc' , np.float32), \n ('delta_y_pos_perc' , np.float32), \n ('delta_z_pos_perc' , np.float32)\n ]\n \n return dt\n\n def stats_custom_bucket(): \n \n mytypes={0: ('snapid', np.int32),\n 1: ('z', np.float32),\n 2: ('a', np.float32),\n 3: ('n_count', np.int64)\n }\n offset=len(mytypes.keys())\n \n for i,prop in enumerate(colnames):\n log_info=ha_lib.get_property_dict()[prop]['output_prop_as']\n #print('i:', i, 'colname:', prop, 'key:', i+offset) \n mytypes.update({offset+i*6+0: (prop+'_'+log_info+'50', ha_lib.lookup_property(prop)['dtype']),\n offset+i*6+1: (prop+'_'+log_info+'10', ha_lib.lookup_property(prop)['dtype']),\n offset+i*6+2: (prop+'_'+log_info+'32', ha_lib.lookup_property(prop)['dtype']), \n offset+i*6+3: (prop+'_'+log_info+'68', ha_lib.lookup_property(prop)['dtype']),\n offset+i*6+4: (prop+'_'+log_info+'90', ha_lib.lookup_property(prop)['dtype']),\n offset+i*6+5: (prop+'_'+log_info+'MAD', ha_lib.lookup_property(prop)['dtype']),\n# offset+i*18+6: ('delta_'+prop+'_'+log_info+'50', np.float32),\n# offset+i*18+7: ('delta_'+prop+'_'+log_info+'10', np.float32),\n# offset+i*18+8: ('delta_'+prop+'_'+log_info+'32', np.float32),\n# offset+i*18+9: ('delta_'+prop+'_'+log_info+'68', np.float32),\n# offset+i*18+10: ('delta_'+prop+'_'+log_info+'90', np.float32),\n# offset+i*18+11: ('delta_'+prop+'_'+log_info+'MAD', np.float32),\n# offset+i*18+12: ('delta_'+prop+'_perc_50', np.float32),\n# offset+i*18+13: ('delta_'+prop+'_perc_10', np.float32),\n# offset+i*18+14: ('delta_'+prop+'_perc_32', np.float32),\n# offset+i*18+15: ('delta_'+prop+'_perc_68', np.float32),\n# offset+i*18+16: ('delta_'+prop+'_perc_90', np.float32),\n# offset+i*18+17: ('delta_'+prop+'_perc_MAD', np.float32),\n })\n\n #print(mytypes)\n #return np.dtype([(mytypes[k][0], mytypes[k][1]) for k in mytypes.keys()])\n \n return [(mytypes[k][0], mytypes[k][1]) for k in mytypes.keys()]\n\n def stats_no_particles_perc_bucket():\n\n dt = [\n ('mhalo1' , np.float32),\n ('delta_mhalo' , np.float32),\n ('delta_mhalo_perc' , np.float32), \n ('rvir1' , np.float32),\n ('delta_rvir' , np.float32),\n ('delta_rvir_perc' , np.float32),\n ('delta_x_pos_perc' , np.float32), \n ('delta_y_pos_perc' , np.float32), \n ('delta_z_pos_perc' , np.float32)\n ]\n \n return dt\n \n def stats_perc_bucket(colnames=[]):\n \n dt = [\n ('snapid' , np.int32), \n ('z' , np.float32), \n ('mhalo1_log50' , np.float32),\n ('mhalo1_log10' , np.float32),\n ('mhalo1_log32' , np.float32), \n ('mhalo1_log68' , np.float32),\n ('mhalo1_log90' , np.float32), \n ('delta_mhalo_log50' , np.float32),\n ('delta_mhalo_log10' , np.float32),\n ('delta_mhalo_log32' , np.float32), \n ('delta_mhalo_log68' , np.float32),\n ('delta_mhalo_log90' , np.float32), \n ('delta_mhalo_perc_50' , np.float32),\n ('delta_mhalo_perc_10' , np.float32),\n ('delta_mhalo_perc_32' , np.float32), \n ('delta_mhalo_perc_68' , np.float32),\n ('delta_mhalo_perc_90' , np.float32), \n ('rvir1_50' , np.float32),\n ('rvir1_10' , np.float32),\n ('rvir1_32' , np.float32), \n ('rvir1_68' , np.float32),\n ('rvir1_90' , np.float32), \n ('delta_rvir_50' , np.float32),\n ('delta_rvir_10' , np.float32),\n ('delta_rvir_32' , np.float32), \n ('delta_rvir_68' , np.float32),\n ('delta_rvir_90' , np.float32), \n ('delta_rvir_perc_50' , np.float32),\n ('delta_rvir_perc_10' , np.float32),\n ('delta_rvir_perc_32' , np.float32), \n ('delta_rvir_perc_68' , np.float32),\n ('delta_rvir_perc_90' , np.float32), \n ('n_particles1_50' , np.float32),\n ('n_particles1_10' , np.float32),\n ('n_particles1_32' , np.float32), \n ('n_particles1_68' , np.float32),\n ('n_particles1_90' , np.float32), \n ('n_particles2_50' , np.float32),\n ('n_particles2_10' , np.float32),\n ('n_particles2_32' , np.float32), \n ('n_particles2_68' , np.float32),\n ('n_particles2_90' , np.float32), \n ('n_particles_shared_50' , np.float32),\n ('n_particles_shared_10' , np.float32),\n ('n_particles_shared_32' , np.float32), \n ('n_particles_shared_68' , np.float32),\n ('n_particles_shared_90' , np.float32), \n ('n_particles_shared_perc1_50' , np.float32),\n ('n_particles_shared_perc1_10' , np.float32),\n ('n_particles_shared_perc1_32' , np.float32), \n ('n_particles_shared_perc1_68' , np.float32),\n ('n_particles_shared_perc1_90' , np.float32), \n ('n_particles_shared_perc2_50' , np.float32),\n ('n_particles_shared_perc2_10' , np.float32),\n ('n_particles_shared_perc2_32' , np.float32), \n ('n_particles_shared_perc2_68' , np.float32),\n ('n_particles_shared_perc2_90' , np.float32), \n ('delta_x_pos_perc_50' , np.float32), \n ('delta_x_pos_perc_10' , np.float32),\n ('delta_x_pos_perc_32' , np.float32), \n ('delta_x_pos_perc_68' , np.float32),\n ('delta_x_pos_perc_90' , np.float32), \n ('delta_y_pos_perc_50' , np.float32), \n ('delta_y_pos_perc_10' , np.float32),\n ('delta_y_pos_perc_32' , np.float32), \n ('delta_y_pos_perc_68' , np.float32),\n ('delta_y_pos_perc_90' , np.float32), \n ('delta_z_pos_perc_50' , np.float32),\n ('delta_z_pos_perc_10' , np.float32),\n ('delta_z_pos_perc_32' , np.float32), \n ('delta_z_pos_perc_68' , np.float32),\n ('delta_z_pos_perc_90' , np.float32),\n ('n_count' , np.int64)\n ]\n \n return dt\n\n\n def merger_trees_ASCII():\n\n dt = [\n ('haloid1' , np.int64), \n ('haloid2' , np.int64), \n ('descIndex1' , np.int64), \n ('descIndex2' , np.int64), \n ('rootIndex' , np.int64),\n ('snapid1' , np.int32), \n ('snapid2' , np.int32), \n ('z1' , np.float32), \n ('z2' , np.float32), \n ('mhalo1' , np.float32),\n ('mhalo2' , np.float32),\n ('delta_mhalo' , np.float32),\n ('delta_mhalo_perc' , np.float32), \n ('rvir1' , np.float32),\n ('rvir2' , np.float32), \n ('delta_rvir', np.float32),\n ('delta_rvir_perc' , np.float32), \n ('n_particles1' , np.int64),\n ('n_particles2' , np.int64),\n ('n_particles_shared', np.float32),\n ('n_particles_shared_perc1', np.float32),\n ('n_particles_shared_perc2', np.float32), \n ('x_pos1' , np.float32),\n ('y_pos1' , np.float32), \n ('z_pos1' , np.float32),\n ('x_pos2' , np.float32),\n ('y_pos2' , np.float32), \n ('z_pos2' , np.float32), \n ('delta_x_pos_perc' , np.float32), \n ('delta_y_pos_perc' , np.float32), \n ('delta_z_pos_perc' , np.float32),\n ('subTreeID' , np.int64)\n ]\n \n return dt\n\n \n def ROCKSTAR_ASCII():\n \"\"\"\n Property names and column in Rockstar ascii files, original labels of properties in the file are commanded. \n Label names are converted to internal universal name convention.\n \n List of property labels in Rockstar ascii files: \n id num_p mvir mbound_vir rvir vmax rvmax vrms x y z vx vy vz Jx Jy Jz E Spin PosUncertainty VelUncertainty bulk_vx bulk_vy bulk_vz BulkVelUnc n_core\n m200b m200c m500c m2500c Xoff Voff spin_bullock b_to_a c_to_a A[x] A[y] A[z] b_to_a(500c) c_to_a(500c) A[x](500c) A[y](500c) A[z](500c) \n Rs Rs_Klypin T/|U| M_pe_Behroozi M_pe_Diemer Halfmass_Radius idx i_so i_ph num_cp mmetric\n \"\"\"\n dt= [\n ('haloid' , np.int64), #id\n ('n_particles' , np.int64), #num_p\n ('mhalo' , np.float32), #mvir \n ('mbasic' , np.float32), #mbound_vir \n ('rvir' , np.float32), #rvir\n ('vmax' , np.float32), #vmax\n ('rvmax' , np.float32), #rvmax\n ('vrms' , np.float32), #vrms \n ('x_pos' , np.float32), #x\n ('y_pos' , np.float32), #y\n ('z_pos' , np.float32), #z\n ('x_vel' , np.float32), #vx\n ('y_vel' , np.float32), #vy\n ('z_vel' , np.float32), #vz\n ('x_ang' , np.float32), #Jx\n ('y_ang' , np.float32), #Jy\n ('z_ang' , np.float32), #Jz\n ('engery' , np.float32), #E\n ('spinParameter' , np.float32), #Spin\n ('unc_pos' , np.float32), #PosUncertainty\n ('unc_vel' , np.float32), #VelUncertainty\n ('x_vel_bulk' , np.float32), #bulx_vx\n ('y_vel_bulk' , np.float32), #bulx_vy\n ('z_vel_bulk' , np.float32), #bulx_vz\n ('unc_vel_bulk' , np.float32), #BulkVelUnc\n ('n_core' , np.int64), #n_core\n ('mhalo_200b' , np.float32), #m200b \n ('mhalo_200c' , np.float32), #m200c\n ('mhalo_500c' , np.float32), #m500c\n ('mhalo_2500c' , np.float32), #m2500c\n ('x_off' , np.float32), #Xoff\n ('v_off' , np.float32), #Yoff\n ('spin_Bullock' , np.float32), #spin_bullock\n ('b_to_a' , np.float32), #b_to_a \n ('c_to_a' , np.float32), #c_to_a\n ('x_a' , np.float32), #A[x]\n ('y_a' , np.float32), #A[y]\n ('z_a' , np.float32), #A[z] \n ('b_to_a_500c' , np.float32), #b_to_a(500c)\n ('c_to_a_500c' , np.float32), #c_to_a(500c) \n ('x_a_500c' , np.float32), #A[x](500c) \n ('y_a_500c' , np.float32), #A[y](500c) \n ('z_a_500c' , np.float32), #A[z](500c)\n ('rscale' , np.float32), #rs \n ('rscale_Klypin' , np.float32), #rs_Klypin\n ('T_U' , np.float32), #T/|U|\n ('Mpseudo_Behroozi', np.float32), #M_pe_Behroozi\n ('Mpseudo_Diemer' , np.float32), #M_pe_Diemer\n ('rhalf_mass' , np.float32), #Halfmass_Radius\n ('idx' , np.int64), #internal debugging quantity\n ('i_so' , np.int64), #internal debugging quantity\n ('i_ph' , np.int64), #internal debugging quantity\n ('n_particle_child', np.int64), #internal debugging quantity\n ('mmetric' , np.float32) #internal debugging quantity \n ] \n return dt\n\n def ROCKSTAR_binary():\n \"\"\"Function returns two dtype structure one for the halo properties (dt) and on for header information (dt_halo_info --> number of halo, number of particles, particle mass,\n particle type (dark matter, gas, etc.), header size (header_size), bytes of information per halo (halo_struct_size), location of bytes where to start the reading from\n (bytes_to_header_info) \n \n halo structure of the binary dump from halo.h in ROCKSTAR (https://bitbucket.org/gfcstanford/rockstar/src/main/)\n \n struct halo {\n int64_t id;\n float pos[6], corevel[3], bulkvel[3];\n float m, r, child_r, vmax_r, mgrav, vmax, rvmax, rs, klypin_rs, vrms,\n J[3], energy, spin, alt_m[4], Xoff, Voff, b_to_a, c_to_a, A[3],\n b_to_a2, c_to_a2, A2[3],\n bullock_spin, kin_to_pot, m_pe_b, m_pe_d, halfmass_radius;\n int64_t num_p, num_child_particles, p_start, desc, flags, n_core;\n float min_pos_err, min_vel_err, min_bulkvel_err;\n };\n \n struct extra_halo_info {\n int64_t child, next_cochild, prev_cochild;\n int64_t sub_of, ph;\n float max_metric;\n };\n \"\"\"\n header_size = 256 #Bytes, size of the header\n halo_struct_size = 264 #Bytes, properties stored for one halo using dtype structure dt (260 from struct 'halo' in halo.h from ROCKSTAR and \n #4 bytes probably from max_metric from struct 'extra_halo_info' in halo.h)\n bytes_to_header_info = 64 #bytes until the header info starts\n \n dt_header_info = [ \n ('n_halos' , np.int64), #total number of halos in this file\n ('tot_n_particles' , np.int64), #total number of particles in this file \n ('box_size' , np.float32), #side lenght in Mpc/h of simulation box\n ('m_particles' , np.float32), #mass of one particle in h-1Msun\n ('type_particles' , np.int64) #type of particle (either 1=halo, star, gas etc.) \n ]\n \n dt = [\n ('haloid' , np.int64), #int64_t id\n ('x_pos' , np.float32), #float pos[6], 1\n ('y_pos' , np.float32), #float pos[6], 2\n ('z_pos' , np.float32), #float pos[6], 3\n ('pos4' , np.float32), #float pos[6], 4\n ('pos5' , np.float32), #float pos[6], 5\n ('pos6' , np.float32), #float pos[6], 6 \n ('x_corevel' , np.float32), #float corevel[3], 1\n ('y_corevel' , np.float32), #float corevel[3], 2\n ('z_corevel' , np.float32), #float corevel[3], 3 \n ('x_vel_bulk' , np.float32), #float bulkvel[3], 1\n ('y_vel_bulk' , np.float32), #float bulkvel[3], 2\n ('z_vel_bulk' , np.float32), #float bulkvel[3], 3\n ('mhalo' , np.float32), #float m \n ('rvir' , np.float32), #float r \n ('rvir_child' , np.float32), #float child_r\n ('vmax_r' , np.float32), #float vmax_r\n ('mhalo_bound' , np.float32), #float mgrav\n ('vmax' , np.float32), #float vmax\n ('vpeak' , np.float32), #float rvmax\n ('rscale' , np.float32), #float rs\n ('rscale_Klypin' , np.float32), #float klypin_rs\n ('vrms' , np.float32), #float vrms\n ('x_ang' , np.float32), #float J[3], 1\n ('y_ang' , np.float32), #float J[3], 2\n ('z_ang' , np.float32), #float J[3], 3\n ('energy' , np.float32), #float energy \n ('spinParameter' , np.float32), #float spin\n ('mhalo_200b' , np.float32), #float alt_m[4], 1 \n ('mhalo_200c' , np.float32), #float alt_m[4], 2 \n ('mhalo_500c' , np.float32), #float alt_m[4], 3 \n ('mhalo_2500c' , np.float32), #float alt_m[4], 4 \n ('x_off' , np.float32), #float Xoff\n ('v_off' , np.float32), #float Voff\n ('b_to_a' , np.float32), #float b_to_a \n ('c_to_a' , np.float32), #float c_to_a\n ('x_a' , np.float32), #float A[3], 1\n ('y_a' , np.float32), #float A[3], 2\n ('z_a' , np.float32), #float A[3], 3 \n ('b_to_a_500c' , np.float32), #float b_to_a2\n ('c_to_a_500c' , np.float32), #float c_to_a2\n ('x_a_500c' , np.float32), #float A2[3], 1 \n ('y_a_500c' , np.float32), #float A2[3], 2\n ('z_a_500c' , np.float32), #float A2[3], 3 \n ('spin_Bullock' , np.float32), #float bullock_spin\n ('T_U' , np.float32), #float kin_to_pot\n ('Mpseudo_Behroozi', np.float32), #float m_pe_b \n ('Mpseudo_Diemer' , np.float32), #float m_pe_d\n ('rhalf_mass' , np.float32), #float halfmass_radius\n ('n_particles' , np.int64), #int64_t num_p\n ('n_particles_child', np.int64), #int64_t num_child_particles \n ('p_start' , np.int64), #int64_t p_start\n ('descIndex' , np.int64), #int64_t desc\n ('flags' , np.int64), #int64_t flags\n ('n_core' , np.int64), #int64_t n_core\n ('PosUncertainty' , np.float32), #float min_pos_err\n ('VelUncertainty' , np.float32), #float min_vel_err\n ('BulkVelUnc' , np.float32), #float min_bulkvel_err\n ('mmetric' , np.float32) #unclear where it comes from, it might be mmetric \n ]\n \n return header_size, halo_struct_size, dt, dt_header_info, bytes_to_header_info\n\n def ROCKSTAR_ASCII_list():\n \"\"\"Property names and column in ROCKSTAR out_x.list files (where x stands for the snapshot number id), original labels of properties in the file are commanded. \n Label names are converted to internal universal name convention.\n \n In this file descendent information can befound (descIndex)!\n \n List of property labels in ROCKSTAR out_x.list files: \n ID DescID Mvir Vmax Vrms Rvir Rs Np X Y Z VX VY VZ JX JY JZ Spin rs_klypin Mvir_all M200b M200c M500c M2500c Xoff Voff spin_bullock\n b_to_a c_to_a A[x] A[y] A[z] b_to_a(500c) c_to_a(500c) A[x](500c) A[y](500c) A[z](500c) T/|U| M_pe_Behroozi M_pe_Diemer Halfmass_Radius\n \"\"\"\n \n dt= [\n ('haloid' , np.int64), #ID\n ('descIndex' , np.int64), #DescID\n ('mhalo' , np.float32), #Mvir\n ('vmax' , np.float32), #Vmax\n ('vrms' , np.float32), #Vrms \n ('rvir' , np.float32), #Rvir\n ('rscale' , np.float32), #Rs\n ('n_particles' , np.int64), #Np \n ('x_pos' , np.float32), #x\n ('y_pos' , np.float32), #y\n ('z_pos' , np.float32), #z\n ('x_vel' , np.float32), #vx\n ('y_vel' , np.float32), #vy\n ('z_vel' , np.float32), #vz\n ('x_ang' , np.float32), #Jx\n ('y_ang' , np.float32), #Jy\n ('z_ang' , np.float32), #Jz\n ('spinParameter' , np.float32), #Spin\n ('rscale_Klypin' , np.float32), #rs_Klypin \n ('mbasic' , np.float32), #Mvir_all\n ('mhalo_200b' , np.float32), #m200b \n ('mhalo_200c' , np.float32), #m200c\n ('mhalo_500c' , np.float32), #m500c\n ('mhalo_2500c' , np.float32), #m2500c\n ('x_off' , np.float32), #Xoff\n ('v_off' , np.float32), #Yoff\n ('spin_Bullock' , np.float32), #spin_bullock\n ('b_to_a' , np.float32), #b_to_a \n ('c_to_a' , np.float32), #c_to_a\n ('x_a' , np.float32), #A[x]\n ('y_a' , np.float32), #A[y]\n ('z_a' , np.float32), #A[z] \n ('b_to_a_500c' , np.float32), #b_to_a(500c)\n ('c_to_a_500c' , np.float32), #c_to_a(500c) \n ('x_a_500c' , np.float32), #A[x](500c) \n ('y_a_500c' , np.float32), #A[y](500c) \n ('z_a_500c' , np.float32), #A[z](500c)\n ('T_U' , np.float32), #T/|U|\n ('Mpseudo_Behroozi', np.float32), #M_pe_Behroozi\n ('Mpseudo_Diemer' , np.float32), #M_pe_Diemer\n ('rhalf_mass' , np.float32) #Halfmass_Radius \n ] \n return dt\n\n def Gadget_binary():\n dt={}\n return dt\n\n choose = {\n 'merger_trees_ASCII': merger_trees_ASCII,\n 'ConsistentTrees_basic_ASCII': ConsistentTrees_basic_ASCII, \n 'ConsistentTrees_ASCII_099': ConsistentTrees_ASCII_099,\n 'ConsistentTrees_ASCII_101': ConsistentTrees_ASCII_101,\n 'stats_basic_bucket': stats_basic_bucket,\n 'stats_perc_bucket': stats_perc_bucket,\n 'stats_custom_bucket': stats_custom_bucket, \n 'ROCKSTAR_ASCII': ROCKSTAR_ASCII,\n 'ROCKSTAR_binary': ROCKSTAR_binary,\n 'ROCKSTAR_ASCII_list': ROCKSTAR_ASCII_list,\n 'Gadget_binary': Gadget_binary\n }\n\n func = choose.get(name)\n return func()", "def read_labels(label_path, label_type, calib_path=None, is_velo_cam=False, proj_velo=None):\n if label_type == \"txt\": #TODO\n places, size, rotates = read_label_from_txt(label_path)\n if places is None:\n return None, None, None\n rotates = np.pi / 2 - rotates\n dummy = np.zeros_like(places)\n dummy = places.copy()\n if calib_path:\n places = np.dot(dummy, proj_velo.transpose())[:, :3]\n else:\n places = dummy\n if is_velo_cam:\n places[:, 0] += 0.27\n\n elif label_type == \"xml\":\n bounding_boxes, size = read_label_from_xml(label_path)\n places = bounding_boxes[30][\"place\"]\n rotates = bounding_boxes[30][\"rotate\"][:, 2]\n size = bounding_boxes[30][\"size\"]\n\n return places, rotates, size", "def read_labels(label_path, label_type, calib_path=None, is_velo_cam=False, proj_velo=None):\n if label_type == \"txt\": #TODO\n places, size, rotates = read_label_from_txt(label_path)\n if places is None:\n return None, None, None\n rotates = np.pi / 2 - rotates\n dummy = np.zeros_like(places)\n dummy = places.copy()\n if calib_path:\n places = np.dot(dummy, proj_velo.transpose())[:, :3]\n else:\n places = dummy\n if is_velo_cam:\n places[:, 0] += 0.27\n\n elif label_type == \"xml\":\n bounding_boxes, size = read_label_from_xml(label_path)\n places = bounding_boxes[30][\"place\"]\n rotates = bounding_boxes[30][\"rotate\"][:, 2]\n size = bounding_boxes[30][\"size\"]\n\n return places, rotates, size", "def get_conv_type(filename):\n for conv_type in structured.conv2d_types.keys():\n if conv_type in filename:\n return conv_type\n else:\n log.error(\"Couldn't detect convolution type of\", filename)\n exit(1)", "def out_pixel_info(year0=2018):\n info = np.loadtxt('prepare_files/station_info/out_pixel_%d.txt' % year0, delimiter=',')\n return info", "def __get_format_info_locations(self,size):\n\t\tloc = {\n\t\t\t\t0: [[8,0],[size-1,8]],\n\t\t\t\t1: [[8,1],[size-2,8]],\n\t\t\t\t2: [[8,2],[size-3,8]],\n\t\t\t\t3: [[8,3],[size-4,8]],\n\t\t\t\t4: [[8,4],[size-5,8]],\n\t\t\t\t5: [[8,5],[size-6,8]],\n\t\t\t\t6: [[8,7],[size-7,8]],\n\t\t\t\t7: [[8,8],[8,size-8]],\n\t\t\t\t8: [[7,8],[8,size-7]],\n\t\t\t\t9: [[5,8],[8,size-6]],\n\t\t\t\t10: [[4,8],[8,size-5]],\n\t\t\t\t11: [[3,8],[8,size-4]],\n\t\t\t\t12: [[2,8],[8,size-3]],\n\t\t\t\t13: [[1,8],[8,size-2]],\n\t\t\t\t14: [[0,8],[8,size-1]]\n\t\t}\n\t\treturn loc", "def read_data(infile):\n extension = os.path.splitext(infile)[1]\n h = read_header(infile)\n nx = int(h['num_x_pts'])\n ny = int(h['num_y_pts'])\n nt = int(h['num_t_pts'])\n fid = open(infile, 'rb')\n fid.seek(512) #skip header\n if extension == '.aps' or extension == '.a3daps':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, ny, nt, order='F').copy() #make N-d image\n elif extension == '.a3d':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, nt, ny, order='F').copy() #make N-d image\n elif extension == '.ahi':\n data = np.fromfile(fid, dtype = np.float32, count = 2* nx * ny * nt)\n data = data.reshape(2, ny, nx, nt, order='F').copy()\n real = data[0,:,:,:].copy()\n imag = data[1,:,:,:].copy()\n fid.close()\n if extension != '.ahi':\n return data\n else:\n return real, imag", "def datasize(self):\n if self.format is AR_FORMAT_SIMPLE:\n return self.size\n elif self.format is AR_FORMAT_BSD:\n return len(self.name)+self.size\n assert False, 'Unknown format %r' % self.format", "def map_dtype(dtype):\n item_idx = int(math.log(dtype.itemsize, 2))\n if dtype.kind == 'i':\n return [int8, int16, int32, int64][item_idx]\n elif dtype.kind == 'u':\n return [uint8, uint16, uint32, uint64][item_idx]\n elif dtype.kind == 'f':\n if dtype.itemsize == 2:\n pass # half floats not supported yet\n elif dtype.itemsize == 4:\n return float32\n elif dtype.itemsize == 8:\n return float64\n elif dtype.itemsize == 16:\n return float128\n elif dtype.kind == 'b':\n return int8\n elif dtype.kind == 'c':\n if dtype.itemsize == 8:\n return complex64\n elif dtype.itemsize == 16:\n return complex128\n elif dtype.itemsize == 32:\n return complex256\n elif dtype.kind == 'O':\n return object_", "def read_map_file(path):\r\n with open(path) as f:\r\n dir_name = os.path.dirname(path)\r\n img = cv2.imread(dir_name + '/' + f.readline().strip())\r\n assert img.shape[0] > 0 and img.shape[1] > 0, 'Can not open image file'\r\n meter_per_pixel = float(f.readline().strip())\r\n ori_str = f.readline().strip().split()\r\n origin = np.array([int(ori_str[0]), int(ori_str[1])])\r\n init_heading = float(ori_str[2])\r\n return img, meter_per_pixel, origin, init_heading", "def _get_thickness(self,filename, maxLen=3):\n filename = os.path.splitext(filename)[0] \n filename = os.path.split(filename)[1] \n filename = filename.split(\"_t\")[-1] \n filename = filename.split(\"_\")[0]\n if \"v\" in filename:\n part1=filename.split(\"v\",1)[0]\n part2=filename.split(\"v\",1)[1]\n filename = ''.join((filename.split(\"v\",1)[0],'.',filename.split(\"v\",1)[1]))#['%s.%s' %(filename.split(\"v\",1)[0],filename.split(\"v\",1)[1])]\n print(filename)\n thickness = filename\n return thickness", "def get_gdal_datatype(in_datatype):\n if in_datatype == 'float64':\n return gdalconst.GDT_Float64\n elif in_datatype == 'float32':\n return gdalconst.GDT_Float32\n elif in_datatype == 'int32':\n return gdalconst.GDT_Int32\n else:\n raise ValueError(\n 'Unrecognized data type in get_gdal_datatype():\\n {}'.format(\n in_datatype))", "def get_image_size(path : str):\n from PIL import Image\n im = Image.open(path)\n return im.size # W, H", "def get_img_data(data_type, file_info, img_info, **kwargs):\n if file_info['ext']=='fits':\n hdulist = get_file(file_info)\n data = hdulist[int(img_info['frame'])].data\n else:\n try:\n from PIL import Image\n except ImportError:\n raise ToyzJobError(\n \"You must have PIL (Python Imaging Library) installed to \"\n \"open files of this type\"\n )\n img = get_file(file_info)\n data = np.array(img)\n \n if data_type == 'data':\n if 'scale' in kwargs:\n width = int(kwargs['width']/2/img_info['viewer']['scale'])\n height = int(kwargs['height']/2/img_info['viewer']['scale'])\n else:\n width = int(kwargs['width']/2)\n height = int(kwargs['height']/2)\n x0 = max(0, kwargs['x']-width)\n y0 = max(0, kwargs['y']-height)\n xf = min(data.shape[1], kwargs['x']+width)\n yf = min(data.shape[0], kwargs['y']+height)\n if 'scale' in kwargs:\n tile_data = {\n 'x0_idx': x0,\n 'y0_idx': y0,\n 'xf_idx': xf,\n 'yf_idx': yf\n }\n data = scale_data(file_info, img_info, tile_data, data)\n else:\n data = data[y0:yf, x0:xf]\n response = {\n 'id': 'data',\n 'min': float(data.min()),\n 'max': float(data.max()),\n 'mean': float(data.mean()),\n 'median': float(np.median(data)),\n 'std_dev': float(np.std(data)),\n 'data': data.tolist()\n }\n elif data_type == 'datapoint':\n if (kwargs['x']<data.shape[1] and kwargs['y']<data.shape[0] and\n kwargs['x']>=0 and kwargs['y']>=0):\n response = {\n 'id': 'datapoint',\n 'px_value': float(data[kwargs['y'],kwargs['x']])\n }\n else:\n response = {\n 'id': 'datapoint',\n 'px_value': 0\n }\n else:\n raise ToyzJobError(\"Loading that data type has not been implemented yet\")\n return response", "def extract_data(filename, num_images, IMAGE_WIDTH):\n\n# this function definition has been taken from internet\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_WIDTH * IMAGE_WIDTH * num_images)\n data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32) #Interpret a buffer as a 1-dimensional array\n data = data.reshape(num_images, IMAGE_WIDTH*IMAGE_WIDTH)\n return data", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def getDimensions():", "def dtype(self):\n if self.num_polygons < 2 ** 8:\n dtype = numpy.uint8\n elif self.num_polygons < 2 ** 16:\n dtype = numpy.uint16\n else:\n dtype = numpy.uint32\n return dtype", "def get_data_ge(logger, file):\n fp = open(file, 'rb')\n offset = 8192\n\n fp.seek(18)\n size, nframes = st.unpack('<ih',fp.read(6))\n if size != 2048:\n logger.error('GE image size unexpected: '+str(size))\n return None, 0, 0\n\n fsize = os.stat(str(fp).split(\"'\")[1]).st_size\n nframes_calc = (fsize - offset)/(2*size**2)\n\n if nframes != nframes_calc:\n logger.error('GE number frames unexpected: '+str(nframes))\n return None, 0, 0\n\n pos = offset\n fp.seek(pos)\n\n return fp, int(nframes_calc), size*size", "def size(path):", "def get_unpack_format(in_dtype, dsize, endian='big', logger=cubeds.pylogger.get_logger(__name__)):\n # logger = cubeds.pylogger.get_logger(__name__)\n if dsize <= 8:\n # bits\n letter = 'b'\n elif dsize == 16:\n # short\n letter = 'h'\n elif dsize == 32:\n # int\n letter = 'i'\n elif dsize == 64:\n # long long\n letter = 'q'\n elif dsize == -1:\n # padding\n letter = str(dsize//8)+'x' # want self.size bytes of padding\n in_dtype = 'int'\n dsize = 1\n else:\n logger.fatal(\"parsing error for datatype size\")\n raise cubeds.exceptions.UnpackFormatError(msg=\"parsing error for datatype size\")\n\n if in_dtype == 'dn':\n letter = letter.upper()\n elif in_dtype == 'sn':\n letter = letter.lower()\n elif in_dtype == 'char':\n letter = str(int(dsize/8)) + 's'\n elif in_dtype == 'float' or in_dtype == 'double':\n letter = 'd'\n else:\n logger.fatal(\"parsing error for datatype \\\"\" + in_dtype + \"\\\" with size \\\"\" + str(dsize) + \"\\\"\")\n raise cubeds.exceptions.UnpackFormatError(msg=\"parsing error for datatype \\\"\" + in_dtype + \"\\\" with size \\\"\"\n + str(dsize) + \"\\\"\")\n\n if endian == 'big':\n letter = '>' + letter\n elif endian == 'little':\n letter = '<' + letter\n else:\n logger.fatal(\"DID NOT SPECIFY ENDIANNESS CORRECTLY\")\n raise cubeds.exceptions.UnpackFormatError(\"Endianness not specified correctly.\")\n return letter", "def _get_num_chars(a):\n if issubclass(a.dtype.type, str_):\n return a.itemsize // 4\n return a.itemsize", "def get_file_type(filepath):\n\n ext = os.path.splitext(filepath)[1]\n if ext in ['.tif', '.tiff']:\n return \"GeoTIFF\"\n elif ext in ['.nc']:\n return \"NetCDF\"\n else:\n return None", "def cvtFromKMG(str):\n\n # remember, we already verify sizeset[]\n match = re.match('(\\d+)([kmg]?\\Z)', str, re.I)\n size = int(match.group(1))\n type = match.group(2).lower()\n if type == '':\n objsize = size\n if type == 'k':\n objsize = size * 1024\n elif type == 'm':\n objsize = size * 1024 * 1024\n elif type == 'g':\n objsize = size * 1024 * 1024 * 1024\n return(objsize)", "def dtype(a):\n return a.dtype", "def analyze_static_image(path: str):\n image = Image.open(path)\n width, height = image.size\n image.close()\n\n media_info = {\n 'width': width,\n 'height': height,\n 'resolution': round(width * height / 1_000_000, 2),\n 'size': os.path.getsize(path),\n 'type': 'image',\n 'signature': '', # TODO\n 'signature_type': '', # TODO\n }\n\n return media_info", "def size_from_name(size, sizes):\n\n by_name = [s for s in sizes if s.name == size]\n if len(by_name) > 1:\n raise Exception('more than one image named %s exists' % size)\n return by_name[0]", "def geoBoundsMetadata(filename,format=\"shapefile\"):\n if format==\"shapefile\":\n with fiona.open(filename, 'r') as c:\n bnd= c.bounds\n bnd=(bnd[0],bnd[2],bnd[3],bnd[1])\n return \"ENVELOPE{0}\".format(bnd)\n\n else:\n with rasterio.open(filename,'r') as c:\n bnd= c.bounds\n bnd=(bnd[0],bnd[2],bnd[3],bnd[1])\n return \"ENVELOPE{0}\".format(bnd)", "def get_vtk_image_shape(image):\n w, h = image.GetDimensions()[:2]\n num_channels = image.GetNumberOfScalarComponents()\n return (h, w, num_channels)", "def dtype_type( dtype, name = None ):\n if name:\n for property in dtype.descr:\n if property[ 0 ] == name:\n return property[ 1 ]\n raise ValueError( \"Property not found\" )\n else:\n if len( dtype.descr ) > 1:\n raise ValueError( \"Multiple types present\" )\n\n return dtype.descr[ 0 ][ 1 ]", "def get_imagetype_from_filename(filename):\n if 'adni' in filename.lower():\n imagetype = 'adni'\n elif 'fmri' in filename.lower():\n imagetype = 'fmri'\n elif 'dti' in filename.lower():\n imagetype = 'dti'\n else:\n print('ERROR: Unknown input file ' + f)\n imagetype = None\n\n return imagetype", "def fileInfo(tif: TiffFile):\n print(tif.flags)\n print(tif.geotiff_metadata)\n for page in tif.pages:\n print(page.tags)\n print(page.geotiff_tags)\n print(page.shape)\n print(page.dtype)\n print(page.flags)", "def _assert_dtype(images):\n dtype = dtypes.as_dtype(images.dtype).base_dtype\n if dtype not in (dtypes.uint8, dtypes.float32):\n raise TypeError('Invalid image dtype {0}, expected uint8 or float32'.format(dtype))\n\n return dtype", "def np_dtype(dali_dtype):\n return numpy.dtype(dali_dtype)", "def dtype():\n return RaggedDtype()" ]
[ "0.6820558", "0.5704018", "0.5546598", "0.5516806", "0.5513057", "0.54892457", "0.53587013", "0.5346659", "0.5331653", "0.529922", "0.5295019", "0.52838844", "0.5277544", "0.5265453", "0.5226433", "0.5198636", "0.51947224", "0.5194617", "0.5169112", "0.51395136", "0.50898606", "0.5039762", "0.50133353", "0.50117266", "0.50100327", "0.49992338", "0.49855244", "0.49773875", "0.4973709", "0.49664664", "0.49664414", "0.495642", "0.49454147", "0.49389434", "0.49295998", "0.4926003", "0.49020582", "0.49005342", "0.48982123", "0.48964784", "0.48793328", "0.48756966", "0.4875346", "0.48685136", "0.48593587", "0.48466417", "0.4845299", "0.4841813", "0.48268923", "0.48152727", "0.4815026", "0.48058233", "0.48022795", "0.47983584", "0.47952232", "0.4792517", "0.47921044", "0.47903177", "0.47808832", "0.47781816", "0.47753334", "0.47741565", "0.47656724", "0.47523692", "0.47465742", "0.47238284", "0.4717977", "0.4717977", "0.47017348", "0.4699275", "0.46988863", "0.46963593", "0.46907026", "0.46825188", "0.4681494", "0.4677752", "0.4673255", "0.46705002", "0.46675265", "0.4662352", "0.4653165", "0.46526778", "0.4650418", "0.46494928", "0.4645982", "0.46446142", "0.4638574", "0.46366224", "0.462874", "0.46281785", "0.46276098", "0.4623237", "0.4617518", "0.46167862", "0.46163416", "0.4612476", "0.46104836", "0.46067804", "0.45967868", "0.45950195" ]
0.8044237
0
Move files out of subdirectories in the current working directory.
def move_file(): # print("\n".join(os.listdir(filepath))) # folders = [os.path.join(filepath, fld) for fld in os.listdir(filepath)] # print(filepath + ":\n " + "\n ".join(folders)) folders = filter(os.path.isdir, os.listdir(u".")) # print("Sub-folders: ", u"\n".join(folders)) for folder in folders: files = [os.path.join(folder, fn) for fn in os.listdir(folder)] files = filter(os.path.isfile, files) for fn in files: _, filename = os.path.split(fn) shutil.move(fn, filename) assert 0 == len(os.listdir(folder))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moveFiles(outputDir, files):\n\tfor fn in files:\n\t\tshutil.move(fn, join(outputDir, getFilenameWithoutPath(fn)))", "def move_recursively(src, dst, overwrite=False, changed_only=True):\n if os.path.isdir(src):\n movetree(src, dst, overwrite, changed_only)\n else:\n movefile(src, dst, overwrite, changed_only)", "def walk():\n os.chdir('Lyrics')\n for directory_name, subdirectories, filenames in os.walk('.'):\n print(\"Directory:\", directory_name)\n print(\"\\tcontains subdirectories:\", subdirectories)\n print(\"\\tand files:\", filenames)\n print(\"(Current working directory is: {})\".format(os.getcwd()))\n for filename in filenames:\n shutil.move(os.path.join(directory_name, filename),\n os.path.join(directory_name) + '/' + get_fixed_filename(filename))", "def move_files(fname_fout, root_dir, dest_dir):\n fname, f_ext = os.path.splitext(fname_fout)\n # Find files which filename of fname_fout\n matches = []\n pattern = fname + '*'\n root_fnames = os.listdir(root_dir)\n for filename in fnmatch.filter(root_fnames, pattern):\n matches.append([filename, os.path.join(root_dir, filename)])\n # Extract new folder name based on fname_fout\n new_folder_name = reshape_fname(fname_fout, ['nairfoil', 'nsetup'])\n dest_dir = os.path.join(dest_dir, new_folder_name)\n # Move files\n for cur_file in matches:\n os.renames(cur_file[1], os.path.join(dest_dir, cur_file[0]))", "def organizeDir(self):\n # Classify every file in dir\n for file in os.listdir(self.path):\n curPath = self.path + file\n self.moveFile(curPath)", "def _recursive_put_files(self, is_subdirectory=False, sub_directory_name=None):\n current_path = os.path.basename(os.getcwd())\n LOG.info(f\"Copying files from the directory '{current_path}'\")\n for path_ in os.listdir():\n # Skip dotfiles and __pycache__\n if path_.startswith('.') or path_.startswith('__'):\n continue\n if os.path.isdir(path_):\n if sub_directory_name is not None:\n dir_name = os.path.join(sub_directory_name, path_)\n else:\n dir_name = path_\n try:\n self._file_explorer.md(dir_name)\n except Exception as e:\n print(e)\n os.chdir(dir_name.split(os.path.sep)[-1])\n self._recursive_put_files(\n is_subdirectory=True,\n sub_directory_name=dir_name,\n )\n else:\n try:\n if sub_directory_name is not None:\n self._file_explorer.put(path_, os.path.join(sub_directory_name, path_))\n else:\n self._file_explorer.put(path_)\n except RemoteIOError as e:\n print(path_, e)\n if is_subdirectory:\n os.chdir(UP_ONE_DIRECTORY)", "def main():\n os.chdir(\"FilesToSort\")\n files = os.listdir('.')\n for file in files:\n extension_directory = file[file.find('.') + 1:]\n try:\n os.mkdir(extension_directory)\n except FileExistsError:\n pass\n shutil.move(file, extension_directory)", "def move_to_folder(folder = \"output\"):\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\") or files.endswith(\".tpl\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def main():\n os.chdir('FilesToSort')\n extension_to_category = {}\n for filename in os.listdir('.'):\n if os.path.isdir(filename):\n continue\n extension = filename.split('.')[-1]\n make_subdirectories(extension, extension_to_category)\n shutil.move(filename, extension_to_category[extension])", "def move_to_folder(folder = \"output\"):\n\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def _move_files(topdatadir, startdate, model_forcing):\n\n curdate = startdate\n subdir = f\"{topdatadir}/cf_{model_forcing}\"\n subdir += f\"_{curdate.year:04d}{curdate.month:02d}\"\n files = glob.glob(f\"{subdir}/*.NC\")\n for filename in files:\n shutil.move(filename, os.path.join(topdatadir, os.path.basename(filename)))\n shutil.rmtree(subdir)", "def moveDirectoryContents(self, source, target, force=False):\n if source.endswith('/') or source.endswith('\\\\'):\n source += '*'\n else:\n source += os.path.sep + '*'\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))", "def cleaning_this_directory():\n import os, shutil\n files = os.listdir(\".\")\n for f in files:\n if os.path.isfile(f):\n extension = f.split(\".\")[-1]\n if extension == 'jpg':\n #move the file\n os.rename(f, \"images/\"+f)\n elif extension == 'JPG':\n #move to xml file\n os.rename(f, 'xml/'+f)\n else:\n pass", "def moveFiles(inputDir, inputFiles):\n\tfor file in inputFiles:\n\t\tlogger.debug('moveFiles: {0}'.format(file))\n\t\tshutil.move(join(inputDir, file), join(inputDir, 'processed', file))\n\n\treturn 0", "def clean(self):\n clean_list = [\n position\n for position in os.listdir()\n if os.path.isfile(position) and not position.startswith(\".\")\n ]\n self.move_files(clean_list)", "def _sync_directories(from_directory, to_directory):\n if not os.path.exists(to_directory):\n os.mkdir(to_directory)\n for root, dirs, files in os.walk(from_directory):\n to_root = root.replace(from_directory, to_directory)\n for directory in dirs:\n to_child_dir = os.path.join(to_root, directory)\n if not os.path.exists(to_child_dir):\n os.mkdir(to_child_dir)\n for fname in files:\n from_file = os.path.join(root, fname)\n to_file = os.path.join(to_root, fname)\n with open(from_file, 'rb') as a, open(to_file, 'wb') as b:\n b.write(a.read())", "def move_files(self, download_path):\n if self.file_list is None:\n self._set_file_list()\n\n for individual_file in self.file_list:\n source_path = os.path.join(self.base_dir, individual_file)\n dest_path = os.path.join(download_path, individual_file)\n # We don't move files that don't exist\n if not os.path.exists(source_path):\n continue\n\n # Make sure the destination directory exists\n if not os.path.exists(os.path.dirname(dest_path)):\n os.makedirs(os.path.dirname(dest_path))\n if self.to_copy:\n shutil.copy(source_path, dest_path)\n else:\n os.rename(source_path, dest_path)\n return", "def move_files(from_dir, to_dir, keyword):\n \n if not os.path.exists(to_dir):\n os.mkdir(to_dir)\n \n if keyword == None:\n # If keyword is left empty, from_dir is considered a list of files.\n to_move = from_dir\n else:\n to_move = glob.glob(os.path.join(from_dir, '*' + keyword + '*'))\n \n n_moved = 0 \n for f in to_move:\n if os.path.isfile(f):\n shutil.move(f, to_dir)\n n_moved += 1\n \n print \"Moved %i files to %s.\" % (n_moved, to_dir)", "def move_files(self, files: List[str], directory=\"\"):\n result = []\n for file in files:\n if directory == \"\":\n temp_file = File(file)\n new_directory = self._create_or_define(temp_file)\n origin_folder = \"\"\n else:\n new_directory = directory\n origin_folder = os.path.basename(os.path.dirname(file))\n temp_file = File(os.path.basename(file))\n\n if not file.startswith(new_directory):\n if temp_file.get_extension():\n temp_extension = \".\" + temp_file.get_extension()\n else:\n temp_extension = \"\"\n\n ordinal_number = self.check_same_objects(new_directory, temp_file)\n target_name = temp_file.get_just_name() + temp_extension\n if ordinal_number:\n formatted_ordinal_number = f\" ({ordinal_number - 1})\"\n target_name = (\n temp_file.get_just_name()\n + formatted_ordinal_number\n + temp_extension\n )\n\n if self.underscore_flag:\n target_name = target_name.replace(\" \", \"_\")\n\n new_position = os.path.join(self.directory, new_directory, target_name)\n\n file_position = os.path.join(\n self.directory, origin_folder, str(temp_file)\n )\n if file_position != os.path.join(\n self.directory,\n new_directory,\n temp_file.get_just_name() + temp_extension,\n ):\n result.append(os.path.join(origin_folder, str(temp_file)))\n self.possibilities[new_directory].files.append(temp_file)\n if not self.dry_run:\n os.rename(file_position, new_position)\n else:\n print(f\"{file_position} would be moved to {new_position}\")\n elif self.dry_run:\n print(\n f\"{file_position} won't be move since the location is the same\"\n )\n\n self.log_result(result, directory)", "def move_files(src, dst, filenames):\n for filename in filenames:\n os.rename(os.path.join(src, filename), os.path.join(dst, filename))", "def mirror_directory_tree_with_files_loop(self, in_dirpath, out_dirpath, only_include_filetypes, include_file_suffix, avoid_files_with):\n for i in os.listdir(in_dirpath):\n if i[0] == '.' or i[:6] == 'README':\n continue\n elif os.path.isdir(in_dirpath + i):\n if not os.path.exists(out_dirpath + i):\n os.makedirs(out_dirpath + i)\n self.mirror_directory_tree_with_files_loop(self, in_dirpath + i + '/', out_dirpath + i + '/', only_include_filetypes, include_file_suffix, avoid_files_with)\n elif os.path.isfile(in_dirpath + i):\n if avoid_files_with:\n if avoid_files_with in '.'.join(i.split('.')[:-1]):\n continue\n if only_include_filetypes:\n suffix = i.split('.')[-1]\n if suffix in only_include_filetypes:\n if include_file_suffix:\n filename = i\n else:\n filename = '.'.join(i.split('.')[:-1])\n self.files_containing_filetype.update([in_dirpath + i])\n self.mirrored_filepaths.update([out_dirpath + filename])\n self.mirrored_directory_leaves.update([out_dirpath])\n else:\n if include_file_suffix or not '.' in i:\n filename = i\n else:\n filename = '.'.join(i.split('.')[:-1])\n self.files_containing_filetype.update([in_dirpath + i])\n self.mirrored_filepaths.update([out_dirpath + filename])\n self.mirrored_directory_leaves.update([out_dirpath])\n else:\n print dirpath + i, 'does not exist'\n return", "def simple_move_files(selected_image_list, out_dir='/command/results/top_images_test_set/'):\n for file_no in range(len(selected_image_list)):\n shutil.move(selected_image_list[file_no], out_dir + selected_image_list[file_no].split('/')[-1])\n return", "def move_dirs(args):\n src = args[0]\n dst = args[1]\n print(\"Moving from: {}\".format(src))\n print(\" to: {}\".format(dst))\n shutil.move(src, dst)\n return", "def rename_files_dirs(options):\n # create dirs first\n call_command('''find . -type d | while read f; do mkdir -p \"$(echo $f | sed 's/%(patrn)s/%(repl)s/g')\"; done''', options)\n # than move files\n call_command('''find . -type f | while read f; do mv \"$f\" \"$(echo $f | sed 's/%(patrn)s/%(repl)s/g')\"; done''', options)\n # delete empty dirs\n call_command('''find -depth -type d -empty -exec rmdir {} \\;''', [(1,1)])", "def organize_by_order(current_path):\n\tfor file in sorted(os.listdir(current_path)):\n\t\tif file != 'file_organizer.py':\n\t\t\ttry:\n\t\t\t\tos.makedirs(file[0])\n\t\t\t\tclick.echo(\"Creating a Folder\",file[0])\n\t\t\texcept:\n\t\t\t\tNone\n\t\t\tshutil.move(file,file[0])\n\t\t\tclick.secho(('Finished moving : {} to {} folder'.format(file,file[0])),fg='green')", "def moveFiles(rootDir):\n\n homedir = os.environ['HOME']\n albumDirec = 'AlbumCoverImages'\n #Check if a directory exists\n if not os.path.isdir(os.path.join(homedir, 'Pictures', albumDirec)):\n print('AlbumCoverImages not found, trying to make...')\n os.makedirs(os.path.join(homedir, 'Pictures', albumDirec))\n \n for root, dirs, files in os.walk(rootDir, topdown=False):\n #print('testtest')\n for name in files:\n \n\n #Find image files, and move them to albumCoverImages\n #For some bullshit reason or statments won't work here, have to\n # parse this out to elif statements, ughhhh...\n \n if '.jpg' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec)))\n \n elif '.png' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n \n elif '.gif' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n \n elif '.pdf' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n\n else:\n try:\n #Use tinytag to get file metadata\n tag = TinyTag.get(os.path.join(root, name))\n artistName = tag.artist\n albumName = tag.album\n \n #TODO: Need to add more conditions\n if isinstance(artistName, str):\n artistName = artistName.replace('/', '_')\n\n elif isinstance(albumName, str):\n albumName.replace('/', '_')\n \n\n #Check if the artists directory exists, if not make it\n try:\n if not os.path.isdir(os.path.join(rootDir, artistName)):\n os.makedirs(os.path.join(rootDir, artistName))\n print('{0} directory made!'.format(artistName))\n \n except ValueError:\n print('ValueError with {0}'.format(root+'/'+name))\n continue\n\n except TypeError:\n print('TypeError with {0}'.format(root+'/'+name))\n continue\n\n #Check if the songs album exists, if not make it\n try:\n if not os.path.isdir(os.path.join(rootDir, artistName, albumName)):\n os.makedirs(os.path.join(rootDir, artistName, albumName))\n print('{0} directory made!'.format(albumName))\n \n except TypeError:\n print('TypeError with {0}! Look at album directory making.'.format(root+'/'+name))\n continue\n\n #TODO: Check if album is in artist direc, if not, move it\n\n #Check if song is in album, if not move it \n try:\n if os.path.isfile(os.path.join(rootDir, artistName, albumName, name)) == False:\n os.rename(os.path.join(root, name), os.path.join(rootDir, artistName, albumName, name))\n print('{0} moved to {1}!'.format(name, albumName))\n \n except TypeError:\n print('TypeError with file {0}! Look at line song moving'.format(root+'/'+name))\n continue\n \n #TODO: Check if this part works\n except LookupError:\n if (\".jpg\") or (\".png\") or (\".7z\") or (\"README\") or (\".zip\") in name:\n continue\n \n else:\n print('No reader support for {0}'.format(name))\n continue", "def move_files(src_dir, dst_dir):\n for f in os.listdir(src_dir):\n try:\n name, season, episode = FILENAME_PATTERN.search(f).groups()\n except AttributeError:\n try:\n name, season, episode = FILENAME_PATTERN2.search(f).groups()\n except AttributeError:\n print \"Cannot parse\", f\n pass\n\n name = name.replace('.', ' ').replace('_', ' ').strip().title()\n\n dir_path = os.path.join(dst_dir, name, 'Season %02d' % int(season))\n full_path = os.path.join(dir_path, f)\n source_path = os.path.join(src_dir, f)\n\n if not os.path.exists(dir_path):\n os.makedirs(dir_path, 0777)\n\n if not os.path.exists(full_path):\n shutil.move(source_path, full_path)\n os.symlink(full_path, source_path)", "def sort_folder():\n for file in downloads_path.iterdir():\n if file.is_file():\n extension = file.suffix\n file = str(file)\n if extension in program_types:\n move_file(file, programs_path)\n elif extension in compressed_types:\n move_file(file, compressed_path)\n elif extension in doc_types:\n move_file(file, documents_path)\n elif extension in music_types:\n move_file(file, music_path)\n elif extension in video_types:\n move_file(file, video_path)\n elif extension in picture_types:\n move_file(file, pictures_path)\n else:\n move_file(file, other_path)", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)", "def movefiles_subjectdirs(sub_dirs, ToProcess):\n \n \n # Create subdirectories\n for subjectDir in sub_dirs:\n os.chdir(subjectDir)\n \n mri_files = glob.glob('*.nii.gz')\n mri_dir_names = []\n \n for mriFile in mri_files:\n split_file = mriFile.split('_')\n from_idx = split_file.index('WIP')\n to_idx = split_file.index('SENSE')\n toAppend = \"_\".join(split_file[from_idx+1:to_idx]) \n mri_dir_names.append(toAppend)\n \n os.mkdir(toAppend)\n shutil.move(mriFile, toAppend)\n \n print \"Created the following subdirs for {0}: \".format(os.path.basename(subjectDir))\n for d in mri_dir_names:\n print d\n print \"\\n\"", "def mirror_directory_tree_with_files(self, out_dirpath, only_include_filetypes=None, include_file_suffix=True, avoid_files_with=None):\n if out_dirpath[-1] != '/':\n out_dirpath += '/'\n if not os.path.exists(out_dirpath):\n os.makedirs(out_dirpath)\n self.mirror_directory_tree_with_files_loop(self, self.dirpath, out_dirpath, only_include_filetypes, include_file_suffix, avoid_files_with)\n return", "def recursively_rename_files():\n ordered_equipts = get_directory_definition()\n\n # Iterates each equipement folder\n for ii in ordered_equipts:\n iterate_dir(ii, ordered_equipts.index(ii))", "def move_file_to_directory(base_path, file_name, directory_name):\n path = FileUtils.full_path\n\n full_file_path = path(base_path, file_name)\n full_dir_path = path(base_path, directory_name)\n full_new_path = path(full_dir_path, file_name)\n try:\n os.rename(full_file_path, full_new_path)\n except FileNotFoundError:\n pass\n # pass for now", "def remove_files(files):\n for file in files:\n if os.path.exists(file):\n if file.startswith(\"./\") or file.startswith(\".\\\\\"):\n file = file[2:]\n if os.path.isdir(file):\n rmtree(file)\n else:\n os.unlink(file)", "def findfif2move(self, source, destination, foldername):\n import glob\n import shutil\n\n os.chdir(source)\n mainfolders = os.listdir(u'.')\n\n for fname in mainfolders:\n try:\n if fname[:2] == foldername:\n subjectdir = os.path.join(source, fname)\n os.chdir(subjectdir)\n subfolders = os.listdir(u'.')\n \n # for each subject in the provided subfolders \n for s in subfolders:\n if s[0] == 's':\n sessiondir = os.path.join(subjectdir, s)\n os.chdir(sessiondir)\n file = glob.glob(\"*.fif\") # find files to move\n\n for files in file: \n shutil.copy(os.path.join(sessiondir,files),\n destination + fname[1:])\n except Exception:\n print(\"Something went wrong while copying the data >>>\", fname)\n pass\n os.chdir(source)", "def _process_relative_to(unpack_root, relative_to):\n if relative_to is None:\n return\n relative_root = unpack_root / relative_to\n if not relative_root.is_dir():\n get_logger().error('Could not find relative_to directory in extracted files: %s',\n relative_to)\n raise ExtractionError()\n for src_path in relative_root.iterdir():\n dest_path = unpack_root / src_path.name\n src_path.rename(dest_path)\n relative_root.rmdir()", "def move_from_temp_directory(self):", "def copy_files_and_create_dirs(files) -> None:\r\n for file in files:\r\n target_dir_name = os.path.dirname(file[1])\r\n\r\n # will create all intermediate-level directories\r\n if not os.path.exists(target_dir_name):\r\n os.makedirs(target_dir_name)\r\n\r\n shutil.copyfile(file[0], file[1])", "def move_files(sim_dir, dest_dir, file_patterns):\n for f in file_patterns:\n for p in glob.glob1(sim_dir, f):\n try:\n shutil.move(os.path.join(sim_dir, p), os.path.join(dest_dir, p))\n except Exception as e:\n print(\n \"error while copy ing file from {} to {}\\n{}\".format(\n sim_dir, dest_dir, e\n )\n )", "def convert_and_move_dir (dirname, origpath, wavpath, mp4path, mono):\n print(dirname)\n origdirpath = path.join(origpath, dirname)\n wavdirpath = path.join(wavpath, dirname)\n for filename in listdir(origdirpath):\n name, ext = path.splitext(filename)\n if ext == \".mp4\":\n print(filename)\n convert_to_wav(filename, name, origdirpath, wavdirpath, mono)\n\n if not path.exists(mp4path):\n makedirs(mp4path)\n shutil.move(origdirpath, mp4path)", "def oh_folders(src, dest=dest):\n copytree(src, dest, ignore=ignore_patterns(*ignore_list), dirs_exist_ok=True)", "def sort_files_in_a_folder(mypath):\n files = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n file_type_variation_list = []\n filetype_folder_dict = {}\n for file in files:\n filetype = file.split(\".\")[1]\n if filetype not in file_type_variation_list:\n file_type_variation_list.append(filetype)\n computer = mypath + \"/\" + filetype + \"_folder\"\n filetype_folder_dict[str(filetype)] = str(computer)\n if os.path.isdir(computer) == True: # folder exists\n continue\n else:\n os.mkdir(computer)\n for file in files:\n src_path = mypath + \"/\" + file\n filetype = file.split(\".\")[1]\n if filetype in filetype_folder_dict.keys():\n dest_path = filetype_folder_dict[str(filetype)]\n shutil.move(src_path, dest_path)\n print(src_path + \">>>\" + dest_path)", "def movetree(src, dst, overwrite=False, changed_only=True):\n assert os.path.isdir(src), \\\n (\"Source path `%s` does not name an existing directory\" % src)\n errors = []\n if not os.path.exists(dst):\n os.makedirs(dst)\n for name in os.listdir(src):\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n try:\n if os.path.isdir(srcname):\n errors.extend(\n movetree(srcname, dstname, overwrite, changed_only))\n else:\n movefile(srcname, dstname)\n except (IOError, os.error) as why:\n errors.append((srcname, dstname, why))\n return errors", "def copyDir(src, dst, includes, excludes = []):\n\tmultiFilesReplacements([], dst, src, includes, excludes)", "def removeDir(self,srcDir,destDir,exts): \n for srcFile in os.listdir(srcDir):\n srcExt = os.path.splitext(srcFile)[-1].lower()\n srcPath = os.path.join(srcDir,srcFile)\n destPath = os.path.join(destDir,srcFile)\n if os.path.exists(destPath):\n if srcExt in exts:\n os.remove(destPath)\n elif os.path.isdir(srcPath):\n self.removeDir(srcPath,destPath,exts)", "def copyUFOs(srcDirPath, dstDirPath):\n if not srcDirPath.endswith('/'):\n srcDirPath += '/'\n if not dstDirPath.endswith('/'):\n dstDirPath += '/'\n assert os.path.isdir(srcDirPath)\n copiedFiles = []\n for fileName in os.listdir(srcDirPath):\n if fileName.endswith('.ufo'):\n shutil.copytree(srcDirPath + fileName, dstDirPath + fileName)\n copiedFiles.append(fileName)\n return copiedFiles", "def _clean_files(self, in_subdirectory=False):\n files = self._file_explorer.ls()\n if not in_subdirectory:\n LOG.info(f\"Cleaning {len(files)} file(s) on the device\")\n for file_ in files:\n try:\n self._file_explorer.rm(file_)\n except Exception as e:\n # Try to explore subdirectory\n LOG.info(f\"Attempting to clean directory {file_}\")\n self._file_explorer.cd(file_)\n self._clean_files(in_subdirectory=True)\n if in_subdirectory:\n self._file_explorer.cd('..')\n else:\n LOG.info(\"Done cleaning FS\")", "def package_files(location = None):\n for entry in os.listdir(location):\n real_location = os.path.join(location, entry)\n if os.path.isdir(real_location) and entry != \"modulefiles\":\n shutil.rmtree(real_location)\n elif entry != 'modulefiles':\n os.unlink(real_location)\n for name in glob.glob(os.path.join(location, 'modulefiles') + \"/*\"):\n shutil.move(name, location)\n for root, dirs, files in os.walk(location):\n for name in dirs:\n dir_location = os.path.join(root, name)\n if name == '.git' or name == '.gitignore':\n shutil.rmtree(dir_location)\n else:\n os.chmod(dir_location, 0o755)\n for name in files:\n file_location = os.path.join(root, name)\n os.chmod(file_location, 0o644) \n os.rmdir(os.path.join(location, \"modulefiles\"))\n\n return location", "def remove_current_logs_and_mv_comp_files(to_move_files, files_to_be_moved):\n [os.remove(f\"{file_name}\") for file_name in to_move_files]\n [shutil.move(os.path.join(LOGS_PATH, file_name), DESTINATION) for file_name in files_to_be_moved]", "def move_files(proj_id):\n project_obj = Project.objects.get(id=proj_id)\n data_files = project_obj.files.all()\n\n for data in data_files:\n working_dir = get_sequencedir(project_obj)\n create_dir(working_dir)\n path = data.file.name.split('/')[-1]\n end_path = os.path.join(working_dir, path)\n\n if file_exists(end_path):\n print(\"File: \", end_path, \" already found. No need to copy.\")\n else:\n try:\n print(\"Copying from %s to %s\" % (data.file.name, end_path))\n shutil.copyfile(data.file.name, end_path)\n # if somehow the user deleted the database files, they are told to restart the database\n except FileNotFoundError:\n print(\"Protected database files have been deleted by the user. Restart the database to continue.\")", "def _clean(dest, level, recursive, rmFileExtensions, rm):\n \n if level > recursive:\n return\n\n for f in os.listdir(dest):\n if os.path.isfile(f):\n ext = os.path.splitext(f)[-1]\n if ext in rmFileExtensions:\n print \"Deleting \", f\n rm(f)\n else:\n _clean(os.path.join(dest, f), level + 1, recursive, rmFileExtensions, rm)", "def rmdirs(path):\n\n for f in file_walker(path,follow_links=False,report_dirs='last'):\n if f[-1]==os.sep:\n if f!=os.sep:\n #print \"os.rmdir(%r)\"%(f[:-1],)\n os.rmdir(f[:-1])\n else:\n #print \"os.remove(%r)\"%(f,)\n os.remove(f)", "async def _copy_folder_files(self, src_dir, dest_dir):\n for dir_item in os.listdir(src_dir):\n src_path = os.path.join(src_dir, dir_item)\n if os.path.isfile(src_path):\n await self._copy_file_with_hook(dir_item, src_path, os.path.join(dest_dir, dir_item))", "def expand(self, path=None):\n if path is not None:\n os.chdir(path)\n\n cwd = os.getcwd()\n logger.debug('Expanding: %s', cwd)\n for src in os.listdir(cwd):\n if os.path.isdir(src):\n logger.debug('Skipping %s, directory', src)\n continue\n parts = src.split(self.separator)\n path, filename = '/'.join(parts[:-1]), parts[-1]\n if path:\n if not os.path.exists(path):\n logger.debug('Making new directory %s', path)\n os.makedirs(path)\n elif os.path.isfile(path):\n raise Exception(\n 'Directory %s and file of same name found.', path)\n\n dest = os.path.join(path, filename)\n logger.info('Renaming %s -> %s' % (src, dest))\n os.rename(src, dest)", "def move_tracks_to_music_folder(self):\n home = os.path.expanduser(\"~\")\n dest = home + \"/Music/\"\n for each_file, artist in self.past_songs_db_data:\n sub_folder = artist + \"/\" if artist != \"\" else \"\" \n # possible race condition\n if not os.path.exists(dest + sub_folder):\n os.makedirs(dest + sub_folder)\n\n if os.path.isfile(each_file) and \\\n not os.path.isfile(dest + each_file): \n shutil.move(each_file, dest + sub_folder)", "def remove_file_from_subfolders(filename, dst, *args, **kwargs):\n for folder in (d for d in os.listdir(dst) if os.path.isdir(d)):\n tgt = os.path.abspath(dst) + '\\\\' + folder + '\\\\' + filename\n if os.path.isfile(tgt):\n print '\\nDeleting {} from {}...'.format(filename, folder)\n try:\n os.remove(tgt)\n except Exception as e:\n print 'Exception removing {} from {}: {}'.format(filename, folder, e)\n else:\n print '{} removed successfully from {}.'.format(filename, folder)\n else:\n print '\\n{} not found in {}'.format(filename, dst)", "def remover_files():\n directory = os.getcwd()\n for file_name in glob.glob((\"{}/tmp/*\").format(directory)):\n remove(file_name)", "def copy_subtree(src, dst):\n for src_f in os.listdir(src):\n src_path = os.path.join(src, src_f)\n if os.path.isdir(src_path):\n dst_path = os.path.join(dst, src_f)\n if not os.path.exists(dst_path):\n shutil.copytree(src_path, dst_path)\n else:\n ProcessJson.copy_subtree(src_path, dst_path)\n elif os.path.isfile(src_path):\n dst_path = os.path.join(dst, src_f)\n if not os.path.exists(dst_path):\n shutil.copy(src_path, dst_path)", "def applyDir(self,srcDir,destDir,exts): \n for srcFile in os.listdir(srcDir):\n srcExt = os.path.splitext(srcFile)[-1].lower()\n srcPath = os.path.join(srcDir,srcFile)\n destPath = os.path.join(destDir,srcFile)\n if srcExt in exts:\n if not os.path.exists(destDir):\n os.makedirs(destDir)\n shutil.copyfile(srcPath,destPath)\n if self.progress: \n self.cumSize += os.path.getsize(srcPath)\n self.progress(self.cumSize,_('Copying Files...'))\n elif os.path.isdir(srcPath):\n self.applyDir(srcPath,destPath,exts)", "def flatten(self, path=None):\n if path is not None:\n os.chdir(path)\n\n cwd = os.getcwd()\n logger.debug('Flattening files in %s' % cwd)\n\n for dirpath, dirnames, filenames in os.walk('.'):\n if dirpath.startswith('./.git'):\n continue\n\n for base_filename in filenames:\n file_path = os.path.join(dirpath, base_filename)\n filename = os.path.relpath(file_path, '.')\n flattened = filename.replace('/', self.separator)\n if filename != flattened:\n logger.info('Renaming %s -> %s' % (filename, flattened))\n os.rename(filename, flattened)", "def move_output_files(output_dir, tree_dir, alignment, save_chain_files, root_dir=\".\"):\n # Create output directory, and subdirectories: analyses, good_trees, bad_trees\n analyses_dir = os.path.join(output_dir, 'analyses', alignment)\n\n if not os.path.exists(analyses_dir):\n os.makedirs(analyses_dir)\n\n if not os.path.exists(tree_dir):\n os.makedirs(tree_dir)\n\n # Move chain files into output/analyses/[alignment]: .chain (maybe), .monitor, .param, .run, .trace, .treelist\n # note that .chain files should only be kept if the save_run flag is True\n keep_file_types = ALL_CHAIN_FILE_TYPES if save_chain_files else KEEP_CHAIN_FILE_TYPES\n candidate_files = os.listdir('.')\n for file_type in keep_file_types:\n for file in candidate_files:\n if file.endswith(file_type):\n current_path = os.path.join('.', file)\n new_path = os.path.join(analyses_dir, file)\n os.rename(current_path, new_path)\n\n # delete all remaining run files\n for file_type in ALL_CHAIN_FILE_TYPES:\n for file in os.listdir('.'):\n if file.endswith(file_type):\n os.remove(file)\n\n # Move and rename output tree file if it has been created\n try:\n os.rename(TREE_FILE_NAME, os.path.join(tree_dir, new_tree_file_name(alignment)))\n except FileNotFoundError:\n warnings.warn(\"The chains have not been running long enough for a tree file to have been generated\",\n UserWarning)", "def move_items(items, target_folder):\n for item in items:\n rename_file(item, os.path.join(target_folder, os.path.split(item)[-1]))", "def copy_files(src_path, dst_path):\r\n for folder in os.listdir(src_path):\r\n for file in os.listdir(os.path.join(src_path, folder)):\r\n source = os.path.join(os.path.join(src_path, folder), file)\r\n dest = os.path.join(dst_path, file)\r\n shutil.copy(source, dest)", "def organize(self):\n for position in os.listdir():\n if os.path.isdir(position):\n temp_folder = Folder(position)\n self.folders.append(temp_folder)\n self._add_all_files(temp_folder)\n\n self._validate_extensions()", "def process_dir(self, src_dir, dst_dir):\n self.logger.tree(src_dir)\n for srcpath in self.list_all_files(src_dir):\n dstpath = srcpath.replace(src_dir, dst_dir)\n # TODO: Can we clean up the way we handle relative_path?\n # Relative path is here so that when we print files in the log it\n # shows only the file's path. Should we just pass it to the logger\n # when we create it? Or let the logger figure it out?\n # relative_path = srcpath.replace(src_dir + '/', '')\n self.cur_file = File(srcpath, dstpath, self.logger)\n self.process_file(self.cur_file)", "def organize_by_ext(current_path,extension):\n\tfor file in os.listdir(current_path):\n\t\text = extension\n\t\tif fnmatch.fnmatch(file,'*' + ext):\n\t\t click.secho(('Found File:{}'.format(file)),fg='blue')\n\t\t # If the file is truly a file...\n\t\t if os.path.isfile(file):\n\t\t try:\n\t\t # Make a directory with the extension name...\n\t\t new_dir = ext.strip(\".\")\n\t\t os.makedirs(new_dir)\n\t\t except:\n\t\t None\n\t\t # Copy that file to the directory with that extension name\n\t\t shutil.move(file,new_dir)\n\tclick.secho(('Finished Moving {} to:{} folder'.format(file,new_dir)),fg='green')", "def moveUp():\r\n\tos.chdir(\"..\")", "def _move_to_inserted_directory(file_path: str):\n parts = list(Path(file_path).parts)\n parts.insert(-1, 'inserted')\n move(file_path, str(Path(*parts)))", "def move_files(probs):\r\n path = '../brain_tiny_dataset_class/png/'\r\n for _, _, files in os.walk(path):\r\n for file in files:\r\n # Reads the ID\r\n id = file[3:-4]\r\n try:\r\n # Reads dictionary of probabilities\r\n result = probs[id]\r\n # Moves pictures in 2 folders\r\n if result['epidural'] > 0 or result['intraparenchymal'] > 0 \\\r\n or result['intraventricular'] > 0 or result['subarachnoid'] > 0 \\\r\n or result['subdural'] > 0:\r\n shutil.move(path + file, '../brain_tiny_dataset_class/hemorrhage/' + file)\r\n else:\r\n shutil.move(path + file, '../brain_tiny_dataset_class/healthy/' + file)\r\n except KeyError:\r\n continue", "def move(queue: Queue,\n from_path: list,\n to_path: str\n ) -> None:\n if len(from_path) > 1: # In case files were got with mask\n for single_path in from_path:\n file = os.path.basename(os.path.normpath(single_path))\n files_location = os.path.commonpath(from_path)\n queue.put(file)\n Thread(target=move_file, args=(queue, files_location, to_path)).start()\n print('Files have been moved.')\n else: # In other cases there will be just one item in array\n source_location = from_path[0]\n if os.path.isdir(from_path[0]):\n files = os.listdir(source_location)\n folder_name = os.path.basename(os.path.normpath(source_location))\n path_to_folder = os.path.join(to_path, folder_name)\n\n if not os.path.exists(path_to_folder):\n os.mkdir(path_to_folder)\n\n threads = []\n for file in files:\n # Each file we put to a queue that has limited number of items.\n # And than it creates a separate thread for each file.\n queue.put(file)\n move_thread = Thread(target=move_file, args=(queue, source_location, path_to_folder))\n threads.append(move_thread)\n move_thread.start()\n # Make sure that all our thread are finished before removing original folder\n for thread in threads:\n thread.join()\n\n os.rmdir(source_location)\n print('Folder has been moved.')\n elif os.path.isfile(from_path[0]): # If it's a file we just copy it without any threads\n file_location = from_path[0]\n file_name = os.path.basename(os.path.normpath(file_location))\n if not os.path.exists(file_name):\n shutil.move(file_location, to_path)\n print(f'File {file_name} has been moved.')\n else:\n print(f'File {file_name} already exists')\n elif not os.path.exists(from_path[0]):\n raise NameError('No such files or folders.')", "def bulk_rename_files(input_path, output_path, suffix, new_suffix):\n for dir_path, dir_names, filenames in os.walk(input_path):\n structure = os.path.join(output_path, dir_path[len(input_path) + 1:])\n for file in filenames:\n src = os.path.join(dir_path, file)\n f_name, ext = os.path.splitext(file)\n if not f_name.endswith(suffix):\n file = f_name + new_suffix + ext\n dest = os.path.join(structure, file)\n os.rename(src, dest)", "def _listFiles(files, path):\n\n for item in os.listdir(path):\n item = os.path.join(path, item)\n if os.path.isdir(item):\n _listFiles(files, item)\n else:\n files.append(item)", "def move_file_to_dir(f, dest_dir):\n ls = list_files(dest_dir)\n if f not in ls:\n shutil.move(f, dest_dir)", "def _remove_files_dirs(self):\n if self.remove_remote_files_dirs:\n self._remove_remote_files_dirs()", "def move_files(file: str, destination: str):\n\n try:\n result = _process_files(\"mv\", \"-v\", file, destination)\n except FileNotFoundError:\n print(\"ERROR: '{}' does not exist.\".format(file))\n except FolderNotFoundError:\n print(\n \"ERROR: '{}' destination does not exist.\".format(destination)\n )\n except InsufficientRightsError:\n print(\"ERROR: Insufficient rights to destination '{}'.\".format(\n destination)\n )\n else:\n print(result)", "def MusicScan():\r\n for root, dirnames, filenames in os.walk(dlPath):\r\n for extend in mustypes:\r\n for filename in fnmatch.filter(filenames, extend):\r\n matches.append(os.path.join(root, filename))\r\n print(os.path.join(root, filename))\r\n shutil.move(os.path.join(root, filename), os.path.join(musicPath, filename))\r\n print color.GREEN + 'File succesfully moved!' + color.ENDC\r\n print 'Finished Scanning For Music'", "def main():\n args = parseArguments()\n setLogger()\n files = ls(args.dirs)\n matches = pad(files)\n if args.dry_run:\n dryRun(matches)\n else:\n move(matches)", "def transfer_files(src: str, dst: str, move_src_data: bool = False):\n if move_src_data:\n logger.info('Move {0} to {1}'.format(src, dst))\n shutil.move(src, dst)\n else:\n logger.info('Copy {0} to {1}'.format(src, dst))\n copy_tree(src, dst)", "def move_files_with_extension(self, extension: str):\n\n while True:\n files_with_extension = self.collect_files_with_extensions(extension)\n print(files_with_extension)\n folders_containing = set(\n [\n os.path.basename(os.path.dirname(file))\n for file in files_with_extension\n ]\n )\n directory = input(\n f\"Files with '{extension}' extension are scattered in your folders:\\n\"\n f\" {', '.join(folders_containing)}\\n\"\n f\"Where do you want to put them?\\n\"\n f\"({', '.join(self.possibilities.keys())})\\n\"\n )\n if directory in self.possibilities:\n self.move_files(files_with_extension, directory)\n break\n else:\n print(\"Invalid Input\")", "def copy_file_to_multiple_subfolders(src, dst, *args, **kwargs):\n print '\\nSource: {}\\nDestinations parent folder: {}'.format(src, dst)\n filename = os.path.basename(src)\n for folder in (d for d in os.listdir(dst) if os.path.isdir(d)):\n print '\\nCopying {} to {}...'.format(filename, folder)\n try:\n shutil.copy(src, os.path.abspath(dst) + '\\\\' + folder)\n except Exception as e:\n print e", "def tearDown(self):\n # unittest.TestCase.tearDown(self)\n\n root = os.path.join(\".\", \"files\")\n endingList = os.listdir(root)\n rmList = [fn for fn in endingList if fn not in self.startingList]\n\n if self.oldRoot == root:\n for fn in rmList:\n fnFullPath = os.path.join(root, fn)\n if os.path.isdir(fnFullPath):\n os.rmdir(fnFullPath)\n else:\n os.remove(fnFullPath)\n\n os.chdir(self.oldRoot)", "def Move(args):\n\n parser = argparse.ArgumentParser(usage='mv [Options] sources... dest',\n description=Move.__doc__)\n parser.add_argument(\n '-v', '--verbose', dest='verbose', action='store_true',\n default=False,\n help='verbose output.')\n parser.add_argument(\n '-f', '--force', dest='force', action='store_true',\n default=False,\n help='force, do not error it files already exist.')\n parser.add_argument('srcs', nargs='+')\n parser.add_argument('dest')\n\n options = parser.parse_args(args)\n\n if options.verbose:\n print('mv %s %s' % (' '.join(options.srcs), options.dest))\n\n for src in options.srcs:\n MovePath(options, src, options.dest)\n return 0", "def recursive_overwrite(source, target, ignore=None):\n\n if os.path.isdir(source):\n if not os.path.isdir(target):\n os.makedirs(target)\n files = os.listdir(source)\n\n if ignore:\n ignored = ignore(source, files)\n else:\n ignored = set()\n\n for f in files:\n if f not in ignored:\n recursive_overwrite(\n os.path.join(source, f), os.path.join(target, f), ignore)\n else:\n shutil.copyfile(source, target)", "def copy_files(mod_name, dir_name, output_dir):\n files = pkg_resources.resource_filename(mod_name, dir_name)\n for filename in os.listdir(files):\n full_filename = os.path.join(files, filename)\n if os.path.isdir(full_filename):\n try:\n shutil.copytree(full_filename, os.path.join(output_dir, filename))\n except FileExistsError:\n continue\n else:\n shutil.copy(full_filename, output_dir)", "def _addOrSkipFiles((files, filesToSkip, dirsToSkip), dirName, filesInDir):\n # Handle skipping.\n toDel = []\n for i in range(len(filesInDir)):\n file = os.path.join(dirName, filesInDir[i])\n if os.path.isdir(file):\n for pattern in dirsToSkip:\n if re.match(pattern, os.path.basename(file)):\n toDel.append(i)\n break\n elif os.path.isfile(file):\n for pattern in filesToSkip:\n if re.match(pattern, os.path.basename(file)):\n toDel.append(i)\n break\n toDel.reverse() # delete list elems backwards for proper indexing\n for i in toDel:\n del filesInDir[i]\n \n # Add remaining files (not dirs).\n for file in filesInDir:\n file = os.path.join(dirName, file)\n if os.path.isdir(file):\n continue\n files.append(file)", "def _walk_dir(self, rootpath):\n assert os.path.isabs(rootpath)\n assert rootpath not in self._dirs\n relpath = self._get_rel_path(rootpath)\n self._dirs[relpath] = Directory(rootpath, relpath, None)\n for dirpath, dirnames, filenames in os.walk(rootpath):\n if 'refdata' in dirnames:\n dirnames.remove('refdata')\n currentdir = self._dirs[self._get_rel_path(dirpath)]\n # Loop through a copy so that we can modify dirnames.\n for dirname in list(dirnames):\n fullpath = os.path.join(dirpath, dirname)\n if fullpath == self._build_root:\n dirnames.remove(dirname)\n continue\n relpath = self._get_rel_path(fullpath)\n self._dirs[relpath] = Directory(fullpath, relpath, currentdir)\n extensions = ('.h', '.cuh', '.hpp', '.c', '.cc', '.cpp', '.cu', '.bm')\n for filename in filenames:\n basename, extension = os.path.splitext(filename)\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = File(fullpath, relpath, currentdir)\n elif extension == '.cmakein':\n extension = os.path.splitext(basename)[1]\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n sourcefile = GeneratorSourceFile(fullpath, relpath, currentdir)\n self._files[relpath] = sourcefile\n fullpath = os.path.join(dirpath, basename)\n relpath = self._get_rel_path(fullpath)\n fullpath = os.path.join(self._build_root, relpath)\n generatedfile = GeneratedFile(fullpath, relpath, currentdir)\n self._files[relpath] = generatedfile\n generatedfile.set_generator_source(sourcefile)\n elif extension in ('.l', '.y', '.pre'):\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = GeneratorSourceFile(fullpath, relpath, currentdir)", "def merge_folders():\r\n from shutil import copyfile\r\n # Merge all folders into main folder\r\n grp_img_dir = os.listdir('Group_Test_Images')\r\n \r\n for grp_img_folder in grp_img_dir:\r\n image_folders = os.listdir('Group_Test_Images'+'/'+grp_img_folder)\r\n \r\n for img_label in image_folders:\r\n new_directory = 'Group_Test_Images'+'/'+img_label\r\n \r\n try:\r\n os.makedirs(new_directory)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n \r\n file_names = os.listdir('Group_Test_Images'+'/'+grp_img_folder+'/'+img_label)\r\n \r\n for file in file_names:\r\n copyfile('Group_Test_Images'+'/'+grp_img_folder+'/'+img_label+'/'+file, new_directory+'/'+file)", "def copyDir(self, src, subpath):\n dst = self.output_path + \"/\" + subpath\n shutil.copytree(src, dst)", "def MovieScan():\r\n for root, dirnames, filenames in os.walk(dlPath):\r\n for extend in movtypes:\r\n for filename in fnmatch.filter(filenames, extend):\r\n matches.append(os.path.join(root, filename))\r\n print(os.path.join(root, filename))\r\n shutil.move(os.path.join(root, filename), os.path.join(moviePath, filename))\r\n print color.GREEN + 'File succesfully moved!' + color.ENDC\r\n print 'Finished Scanning For Movies'", "def mv(*args):\n filenames = _glob(args)\n nfilenames = len(filenames)\n if nfilenames < 2:\n print 'Need at least two arguments'\n elif nfilenames == 2:\n try:\n os.rename(filenames[0], filenames[1])\n except os.error, detail:\n print \"%s: %s\" % (detail[1], filenames[1])\n else:\n for filename in filenames[:-1]:\n try:\n dest = filenames[-1] + '/' + filename\n if not os.path.isdir(filenames[-1]):\n print 'Last argument needs to be a directory'\n return\n os.rename(filename, dest)\n except os.error, detail:\n print \"%s: %s\" % (detail[1], filename)", "def demo_walk():\n os.chdir('Lyrics')\n for directory_name, subdirectories, filenames in os.walk('.'):\n print(\"Directory:\", directory_name)\n print(\"\\tcontains subdirectories:\", subdirectories)\n print(\"\\tand files:\", filenames)\n print(\"(Current working directory is: {})\".format(os.getcwd()))\n\n # Loop through each file in the (current) directory\n for filename in filenames:\n new_name = get_fixed_filename(filename)\n source = os.path.join(directory_name, filename)\n destination = os.path.join(directory_name, new_name)\n print(\"Renaming {} to {}\".format(source, destination))\n os.rename(source, destination)", "def move_files(origin=''):\n\tpng_file_list = glob.glob(origin+'*png')\n\tif png_file_list != []:\n\t\tif not os.path.exists(origin+'positions-histograms'):\n\t\t\tos.makedirs(origin+'positions-histograms')\n\t\tfor png in png_file_list:\n\t\t\tshutil.move(str(png), origin+'positions-histograms')", "def move_media(items, dest):\n for file in items:\n filename = os.path.basename(file)\n os.rename(file, dest + '\\\\' + filename)", "def _zipdir(self, dir: Path, zip_handle: zipfile.ZipFile) -> None:\n for root, _, files in os.walk(dir):\n for file in files:\n zip_handle.write(os.path.join(root, file), file)", "def copy_tree_to_path(src_dir, dest_dir):\n names = os.listdir(src_dir)\n\n for name in names:\n srcname = os.path.join(src_dir, name)\n destname = os.path.join(dest_dir, name)\n\n if os.path.isdir(srcname):\n shutil.copytree(srcname, destname)\n else:\n shutil.copy(srcname, destname)", "def move_files_checked(fname_fout, extensions, dest_dir):\n fname, f_ext = os.path.splitext(fname_fout)\n # Check if all requested files are present\n for ext in extensions:\n cur_file = fname + ext\n if not os.path.isfile(cur_file):\n return False\n # Extract new folder name based on fname_fout\n new_folder_name = reshape_fname(fname_fout, ['nairfoil', 'nsetup'])\n dest_dir = os.path.join(dest_dir, new_folder_name)\n\n # Move files\n for ext in extensions:\n cur_file = fname + ext\n os.renames(cur_file, os.path.join(dest_dir, cur_file))\n return True", "def dirs(base, wildcard='[!.]*', recursive=1, prune=('.git', '.svn', 'CVS')):\n prune = tuple(prune or ())\n for dirpath, dirnames, _ in walk(native(base)):\n for item in prune:\n if item in dirnames:\n dirnames.remove(item)\n\n dirnames.sort()\n for name in _fnmatch.filter(dirnames, wildcard):\n dest = _os.path.join(dirpath, name)\n if dest.startswith(root):\n dest = dest.replace(root, '', 1)\n aslist = []\n head, tail = _os.path.split(dest)\n while tail:\n aslist.append(tail)\n head, tail = _os.path.split(head)\n aslist.reverse()\n dest = '/'.join(aslist)\n yield dest\n\n if not recursive:\n break", "def __get_files(self):\n if len(self.files) == 0:\n self.files = os.listdir(self.__path())\n self.files.sort()\n if self.parent:\n self.files.insert(0, \"..\")\n for index, name in enumerate(self.files, start=1):\n if self.__is_dir(self.__make_path(name)):\n self.files[index] = name + \"/\"", "def main():\r\n parent_dir = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n\r\n if not (os.path.isdir(parent_dir)):\r\n raise Exception(\"The directory doesn't exist\")\r\n\r\n directories = []\r\n\r\n for directory in os.listdir(parent_dir):\r\n directories.append(os.path.join(parent_dir, directory))\r\n\r\n # The group_dic represents the dictionary with keys equal to the unique dates in the directories\r\n # And the values represent a list of all files that have the same date prefix across the data_dumps\r\n group_dic = grouping(directories)\r\n\r\n # Moving Files into New Directory\r\n move(group_dic, parent_dir)\r\n print(\"Files Moved Successfully\")" ]
[ "0.68817514", "0.6756969", "0.66850746", "0.6610166", "0.6560671", "0.65322196", "0.65059394", "0.643787", "0.6378325", "0.63559556", "0.6313153", "0.62905586", "0.62528837", "0.6242785", "0.62256724", "0.6196293", "0.61216223", "0.611683", "0.61057925", "0.61052036", "0.6077716", "0.6065733", "0.6044018", "0.6006781", "0.60042477", "0.5984417", "0.5982206", "0.5965177", "0.5953763", "0.5953763", "0.59461683", "0.5921779", "0.58842415", "0.58631605", "0.5855661", "0.5851702", "0.5839384", "0.57972544", "0.5777", "0.5769277", "0.57685804", "0.5746299", "0.57433015", "0.5730579", "0.57277447", "0.57251316", "0.5723609", "0.5717213", "0.57121927", "0.5689727", "0.56836146", "0.5683404", "0.56828344", "0.5682653", "0.5677712", "0.56486285", "0.5633354", "0.5632144", "0.5625081", "0.562451", "0.5614217", "0.5609803", "0.5599709", "0.55977136", "0.5582445", "0.55578417", "0.55556244", "0.5554044", "0.5551264", "0.55427474", "0.55400175", "0.55371255", "0.55359584", "0.5529556", "0.55264205", "0.5507592", "0.5505549", "0.55028224", "0.55023324", "0.5498274", "0.5485183", "0.5473426", "0.54660195", "0.5463568", "0.546082", "0.54522514", "0.5443006", "0.5437641", "0.5437354", "0.5436981", "0.5436322", "0.54360807", "0.54349697", "0.5409305", "0.54068524", "0.540634", "0.5401999", "0.53824335", "0.5377982", "0.53753674" ]
0.7259415
0
Find duplications in submitted homework.
def find_duplication(homework): re_id = re.compile(r'(?P<stuid>[0-9]{10,11})') dup_check = dict() with open(homework, 'r') as data: lines = data.readlines() for ln in lines: dt = ln.split() csum, right = dt[0], dt[1] if csum not in dup_check: dup_check[csum] = list() m = re_id.search(right) if m is not None: stu_id = m.group('stuid') dup_check[csum].append(stu_id) dup_check = filter(lambda k, v: len(v) > 1, dup_check.items()) dup_check = [(key, sorted(val)) for key, val in dup_check] return dup_check
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _remove_dupes(recs, input, bad_movies, hist_list=[], feedback_list=[]):\n all_rated = input + bad_movies + hist_list + feedback_list\n nonlocal dupes\n dupes = [x for x in recs if x[0] in input]\n return [x for x in recs if x[0] not in all_rated]", "def find_duplicates():\n return AppServer.service.find_duplicated_files()", "def handle_duplicates(self, database):\n number_of_duplicates = 0\n number_of_merged = 0\n if not database.session:\n logger.error(\"no database session\")\n return (number_of_duplicates, number_of_merged)\n\n # return if this workout already has been checked\n if self.is_duplicate_with or self.manual_check_required_with:\n logger.debug(\"dup check - no check, since this workout is marked: {}\".format(self))\n return (number_of_duplicates, number_of_merged)\n\n # return if this workout does not have start_time set, since the following checks are based on it\n if not self.start_time or not self.duration_sec:\n return (number_of_duplicates, number_of_merged)\n\n # potential duplicate if time is overlapping\n # this workout |-----------------|\n # 1st potential duplicate in db |-----------------|\n # 2nd potential duplicate in db |------------------------|\n # 3rd potential duplicate in db |----------------|\n # 4th potential duplicate in db |---------|\n # (Remark to line 2 of 1st filter: needed to use database functions, \n # because modifiers like timedelta do not work with sqlalchemy sql attributes)\n # TODO handle timezones (needed for sqlite strftime)\n duplicates = database.session.query(Workout)\\\n .filter(or_(and_(Workout.start_time < self.start_time,\n func.strftime('%s', Workout.start_time, 'utc') + Workout.duration_sec >= self.start_time.timestamp()),\n and_(Workout.start_time >= self.start_time,\n Workout.start_time < (self.start_time + datetime.timedelta(seconds=int(self.duration_sec))))))\\\n .filter(Workout.is_duplicate_with == None)\\\n .filter(Workout.manual_check_required_with == None)\\\n .all()\n\n if len(duplicates) <= 1: \n return (number_of_duplicates, number_of_merged)\n\n # find overlapping workouts of different sports -> set manual_check_required_with\n for duplicate in duplicates:\n if duplicate.sport_id != self.sport_id:\n self.manual_check_required_with = duplicate.id\n logger.debug(\"dup check - workout marked to be checked: {}\".format(duplicate))\n duplicates.remove(duplicate)\n if len(duplicates) <= 1: \n return (number_of_duplicates, number_of_merged)\n\n # find overlapping workouts of same sports (they are duplicate workouts) -> now find the leading workout\n leading_workout = None\n # Step 1: if one of the duplicates is a previously merged one, use it as the leading workout\n for duplicate in duplicates:\n if duplicate.source and duplicate.source == \"MERGED WORKOUT\":\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 1: {}\".format(leading_workout))\n break\n # Step 2: else if one of the duplicates is from Zwift, prefer it as the leading workout\n if not leading_workout:\n for duplicate in duplicates:\n if duplicate.name and \"Zwift\" in duplicate.name:\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 2: {}\".format(leading_workout))\n break\n # Step 3: else if one of the duplicates is a Garmin import, prefer it as the leading workout\n if not leading_workout:\n for duplicate in duplicates:\n if duplicate.source and \"Garmin\" in duplicate.source:\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 3: {}\".format(leading_workout))\n break\n # Step 4: else use this workout as the leading workout\n if not leading_workout:\n leading_workout = self\n logger.debug(\"Found leading workout in step 4: {}\".format(leading_workout))\n\n # create a new workout that will be treated as the leading one. Mark the duplicates \n if leading_workout.source == \"MERGED WORKOUT\":\n merged_workout = leading_workout\n else:\n merged_workout = Workout(source=\"MERGED WORKOUT\", external_id=datetime.datetime.now().timestamp())\n number_of_merged += 1\n merged_workout._merge_attributes(leading_workout)\n logger.debug(\"dup check - merged workout with leading: {}\".format(merged_workout))\n merged_workout.add(database)\n leading_workout.is_duplicate_with = merged_workout.id\n number_of_duplicates += 1\n\n for duplicate in duplicates:\n if duplicate is leading_workout:\n # already merged above\n continue\n if duplicate.is_duplicate_with == merged_workout.id:\n # already merged\n continue\n merged_workout._merge_attributes(duplicate)\n logger.debug(\"dup check - merged workout duplicate: {}\".format(merged_workout))\n duplicate.is_duplicate_with = merged_workout.id\n number_of_duplicates += 1\n logger.debug(\"dup check - duplicate workout marked: {}\".format(duplicate))\n\n return (number_of_duplicates, number_of_merged)", "def filter_dups(saved_home, dups_info_home):\n orig_context_file = open(os.path.join(saved_home, 'data_for_corenlp', 'kp20k_training_context_for_corenlp.txt'),\n encoding='utf-8')\n context_lines = orig_context_file.readlines()\n orig_allkeys_file = open(os.path.join(saved_home, 'data_for_corenlp', 'kp20k_training_keyword_for_corenlp.txt'),\n encoding='utf-8')\n allkeys_lines = orig_allkeys_file.readlines()\n assert len(context_lines) == len(allkeys_lines)\n\n # filter out the duplicates in the validation and the testing datasets and the kp20k training dataset itself\n dups_info_datasets = ['kp20k_training', 'kp20k_validation', 'kp20k_testing',\n 'inspec_testing', 'krapivin_testing',\n 'nus_testing', 'semeval_testing']\n total_filtered_idx_set = set()\n for dataset in dups_info_datasets:\n filtered_idx_set = set()\n dups_info_file = open(\n os.path.join(dups_info_home, '{}_context_nstpws_dups_w_kp20k_training.txt'.format(dataset)), encoding='utf-8')\n for line in dups_info_file:\n line = line.strip()\n # inspec_testing_48 kp20k_training_433051 jc_sc:0.7368; affine invariants of convex polygons | affine invariants of convex polygons\n dups, titles = line.split(';')\n src_dup, filtered_dup, _ = dups.split()\n src_idx = int(src_dup.strip().split('_')[-1])\n filtered_idx = int(filtered_dup.strip().split('_')[-1])\n if dataset != 'kp20k_training':\n filtered_idx_set.add(filtered_idx)\n else:\n if src_idx not in filtered_idx_set:\n filtered_idx_set.add(filtered_idx)\n total_filtered_idx_set = total_filtered_idx_set.union(filtered_idx_set)\n print('Num of filtered kp20k training data: {}'.format(len(total_filtered_idx_set)))\n\n # also filter out the invalid data samples\n print('Finding the invalid data samples in the original kp20k training ...')\n for corpus_idx in tqdm(range(len(context_lines))):\n if context_lines[corpus_idx].strip().split() == [''] or allkeys_lines[corpus_idx].strip().split(' ; ') == ['']:\n total_filtered_idx_set.add(corpus_idx)\n print('Num of filtered kp20k training data: {}'.format(len(total_filtered_idx_set)))\n\n total_filtered_idxes = sorted(list(total_filtered_idx_set))\n for filter_idx in total_filtered_idxes:\n context_lines[filter_idx] = '\\n'\n allkeys_lines[filter_idx] = '\\n'\n\n filtered_context_file = open(os.path.join(saved_home, 'data_for_corenlp',\n 'kp20k_training_context_for_corenlp_filtered.txt'),\n 'w', encoding='utf-8')\n filtered_context_file.writelines(context_lines)\n\n filtered_allkeys_file = open(os.path.join(saved_home, 'data_for_corenlp',\n 'kp20k_training_keyword_for_corenlp_filtered.txt'),\n 'w', encoding='utf-8')\n filtered_allkeys_file.writelines(allkeys_lines)\n\n orig_context_file = open(os.path.join(saved_home, 'data_for_corenlp',\n 'kp20k_training_filtered_for_corenlp_idxes.txt'),\n 'w', encoding='utf-8')\n orig_context_file.write(' '.join([str(idx) for idx in total_filtered_idxes]) + '\\n')\n orig_context_file.write(str(len(total_filtered_idxes)) + '\\n')", "def find_duplicate(student_list):\r\n place_holder = student_info('null', 'null', '0', '0')\r\n current = place_holder\r\n dupe = []\r\n final = []\r\n for student in student_list:\r\n previous = current\r\n current = student\r\n if current.first == previous.first:\r\n if previous in final:\r\n dupe.append(final.pop())\r\n dupe.append(student)\r\n elif current.first != previous.first:\r\n if len(dupe) > 1:\r\n dupe.sort(key=lambda x: x[1])\r\n for student_dupe in dupe:\r\n final.append(student_dupe)\r\n final.append(student)\r\n dupe = []\r\n else:\r\n final.append(student)\r\n if len(dupe) > 1:\r\n dupe.sort(key=lambda x: x[1])\r\n for student_dupe in dupe:\r\n final.append(student_dupe)\r\n for student_final in final:\r\n print(student_format(student_final))", "def find_duplicates(lst):\n \"*** YOUR CODE HERE ***\"\n return len( set(lst) ) != len(lst)", "def list_dups(exproot, **kwargs):\n seen_args = []\n seen_names = []\n for jobname, args, results in load_all(exproot):\n if args in seen_args:\n print jobname, 'is dup of', seen_names[seen_args.index(args)]\n elif args != None:\n seen_args.append(args)\n seen_names.append(jobname)", "def test_identify_duplicates_6(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)", "def test_identify_duplicates_1(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"L5\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)", "def duplicates_marked_reciprocally():\n ids = FRAMEWORKS_DF['CURATED-COFs ID'].str\n messages = []\n\n for _index, row in FRAMEWORKS_DF.iterrows():\n if row['Duplicate found'] != 'none':\n original_id = row['CURATED-COFs ID']\n duplicate_id = row['Duplicate found']\n duplicate_row = FRAMEWORKS_DF.loc[FRAMEWORKS_DF['CURATED-COFs ID'] == duplicate_id ]\n if not len(duplicate_row) == 1:\n messages.append(f'Found row without reciprocal duplicate mark:\\n{row}')\n\n duplicate_row_original_id = duplicate_row['Duplicate found'].values[0]\n if not duplicate_row['Duplicate found'].values[0] == original_id:\n messages.append(f'Duplicate row lists ID {duplicate_row_original_id}, expected {original_id}')\n\n if messages:\n print('\\n'.join(messages))\n sys.exit(1)\n\n print('Rows marked as duplicates go both ways.')", "def test_identify_duplicates_3(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"L5\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)", "def test_duplicated_gaitid(self):\n idaa_index = 6\n\n upload_program = program.ProgramUpload(idaa_program=self.idaa_json['value'][idaa_index]['fields'],\n msr_country_codes_list=msr_country_codes_list, msr_gaitid_list=msr_gaitid_list, duplicated_gaitids=self.duplicated_gaitids\n )\n\n self.assertFalse(upload_program.is_valid())\n self.assertTrue(upload_program.has_discrepancy('duplicate_gaitid'))", "def test_identify_duplicates_2(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = \"none\"\n ticket1.type = \"replace\"\n ticket1.phage_id = \"none\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = \"none\"\n ticket2.type = \"replace\"\n ticket2.phage_id = \"none\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)", "def test_identify_duplicates_4(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)", "def duplicates(deleteFlag=False,cnst='1'):\n output = db.query(['fwid','strjob'],cnst) # list of (fwid,strjob) pairs\n rptDict={} # dictionary for repeat values (key = first fwid, value = list of duplicates)\n for fwid,strjob in output:\n for f,s in output: # double-FOR loop\n if f is None: print 'NONE FWID??? ',f,s # hopefully this isn't flagged\n if strjob == s and f!=fwid: # condition for duplicate job\n if fwid not in list(itertools.chain.from_iterable(rptDict.values())): \n if fwid in rptDict.keys(): rptDict[fwid].append(f) # add to the list\n else: rptDict[fwid] = [f] # create a key,value pair\n print 'FWIDs with equal strjob entries: \\n',abbreviateDict(rptDict) # summarize results\n if deleteFlag:\n delfws = list(itertools.chain.from_iterable(rptDict.values()))\n if ask('Are you sure you want to delete %d duplicates?'%len(delfws)):\n for f in delfws: delete('fwid = %d'%f,False)", "def __delete_duplicates(self):\n log = logging.getLogger()\n log.debug(\"\\n---> Duplicate check <---\")\n\n chromosomes = list(set(self.chromosomes))\n diff = self.size - len(chromosomes)\n\n if diff > 0:\n log.debug(\"---> Duplicate(s) found! <---\")\n for i in range(diff):\n chromosomes.append(\n Chromosome(self.__generate_random_gene_sequence(),\n self.environment))\n else:\n log.debug(\"---> No duplicates found! <---\")\n\n self.chromosomes = chromosomes", "def check(self):\n if not self.session:\n print(\"no database\")\n\n number_of_checked_workouts = 0\n number_of_merged_workouts = 0\n number_of_duplicate_workouts = 0\n workouts = self.session.query(Workout).all()\n for workout in workouts:\n number_of_checked_workouts += 1\n if workout.is_duplicate_with:\n number_of_duplicate_workouts += 1\n else:\n (a, b) = workout.handle_duplicates(self)\n number_of_duplicate_workouts += a\n number_of_merged_workouts += b\n logger.info('{} workouts checked, {} of them were duplicate, created {} merged workouts'\\\n .format(number_of_checked_workouts,\n number_of_duplicate_workouts,\n number_of_merged_workouts,))", "def testDuplicate(self,permutations=True):\n # This algorithm is faster than encode,\n # but for nplex=2 enmagic2 would probably still be faster.\n if permutations:\n C = self.copy()\n C.sort(axis=1)\n else:\n C = self\n ind = sortByColumns(C)\n C = C.take(ind,axis=0)\n ok = (C != roll(C,1,axis=0)).any(axis=1)\n if not ok[0]: # all duplicates -> should result in one unique element\n ok[0] = True\n return ind,ok", "def listDuplicate(self,permutations=True):\n ind,ok = self.testDuplicate(permutations)\n return ind[~ok]", "def find_duplicates(path, extension):\n files = list_all_files_with_extension(path, extension)\n result = set()\n duplicates = set()\n for file in files:\n if file in result:\n print(\"duplicate\")\n print(file)\n duplicates.add(file)\n else:\n result.add(file)\n return duplicates", "def dupable_matches_required(self):\n return 2", "def isduplicate(a, b):\n db = bibtexparser.loads(a+'\\n'+b)\n e1, e2 = db.entries\n refs = Biblio()\n return refs.eq(e1, e2)", "def isduplicate(self, a, b):\n db = bibtexparser.loads(a+'\\n'+b)\n e1, e2 = db.entries\n refs = Biblio(similarity=self.similarity)\n return refs.eq(e1, e2)", "def findDuplicates(self, nums):\n nums = sorted(nums)\n ans = []\n i = 0\n while i < len(nums) - 1:\n if nums[i] == nums[i + 1]:\n ans.append(nums[i])\n i += 2\n else:\n i += 1\n\n return ans", "def for_duplicates(self):\n print('++++++++++++ Duplicates Check Start+++++++++++++')\n print('Report for:', self.name)\n if not self.df.empty:\n for column in self.df.columns:\n if self.df.duplicated(column).sum() > 0:\n print('Duplicates found in: ', column)\n else:\n print('No duplicates found in: ', column)\n else:\n print('Empty data set')\n print('++++++++++++ Duplicates Check End+++++++++++++')", "def find_duplicate_game_docs(self):\n gids = self._db.Games.aggregate([{'$group':\n {'_id' : '$gid',\n 'count' : {'$sum' : 1}}},\n {'$match':\n {'count' : {'$gt' : 1}}}])\n return [x['_id'] for x in gids]", "def remove_duplicates_badSolution( li ):\n newli=[]\n seen = set()\n for item in li:\n if item not in seen:\n seen.add( item )\n newli.append(item)\n\n return newli", "def find_the_duplicate(nums):\n # frequency = {}\n\n # for num in nums:\n # frequency[num] = frequency.get(num, 0) + 1\n\n # for num in frequency:\n # if frequency[num] == 2:\n # return num\n\n ##########\n\n # nums_dict = list(enumerate(sorted(nums)))\n\n # for i, num in nums_dict:\n # if num == nums_dict[i + 1]:\n # return num\n\n ##################\n\n seen = set()\n\n for num in nums:\n if num in seen:\n return num\n seen.add(num)", "def check_no_duplicates(examples):\n return len(examples) == len(set(examples))", "def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:\n nums.sort()\n n = len(nums)\n ans, res = [], []\n\n for i in range(2**n, 2**(n+1)):\n # generate bitmask, from 0..00 to 1..11\n bitmask = bin(i)[3:]\n res = [nums[j] for j in range(n) if bitmask[j] == '1']\n if res not in ans:\n ans.append(res)\n\n return ans\n # print(ans)", "def get_duplicates(input_list):\n size = len(input_list)\n duplicates = list()\n for i in range(size):\n k = i + 1\n for j in range(k, size):\n if input_list[i] == input_list[j] and input_list[i] not in duplicates:\n duplicates.append(input_list[i])\n return duplicates", "def check_image_for_duplicates(original_image):\n\n original_image_hash = get_average_hash(original_image)\n\n print(f'Checking for duplicate images for {original_image}')\n\n for potential_duplicate_image in images_in_directory:\n potential_duplicate_image_hash = get_average_hash(\n potential_duplicate_image)\n\n if ((original_image != potential_duplicate_image) & compare_image_hashes(original_image_hash, potential_duplicate_image_hash)):\n return potential_duplicate_image\n\n pass", "def findDuplicates2(self, nums):\n ans = []\n elem2count = {}\n for num in nums:\n elem2count[num] = elem2count.get(num, 0) + 1\n if elem2count[num] == 2:\n ans.append(num)\n\n return ans", "def containsDuplicateSet(self, nums):\n distinct_nums = set()\n for number in nums:\n if number in distinct_nums:\n return True\n distinct_nums.add(number)\n return False", "def subsets_with_dup(s):\n r = [[]]\n for e in s:\n print 'r: %-55r e: %r' % (e,r)\n for x in r:\n a = sorted(x + [e])\n if not(a in r): r.append(a) \n return r", "def containsDuplicate(self, nums):\n for i in range(len(nums)):\n for j in range(i + 1, len(nums)):\n if nums[i] == nums[j]:\n return True\n return False", "def check_all_duplicates(duplicate_list):\n non_dupes = []\n for dupes in duplicate_list:\n if not check_duplicates(dupes):\n non_dupes.append(dupes)\n return non_dupes", "def identifyClones(psDf, min_quality_for_singletons=20, average_clone_scores=[], none_score_for_averaging=9.6):\n \n clonesDf = findClones(psDf,\n min_quality_for_singletons=min_quality_for_singletons,\n average_clone_scores=average_clone_scores,\n none_score_for_averaging=none_score_for_averaging)\n return clonesDf", "def crosscheck(submitted_wb, temp_dir):\n submitted_sheets = sorted(submitted_wb.sheets(), key=lambda x: x.name)\n\n temp_sheets = sorted([xlrd.open_workbook(os.path.join(temp_dir, temp_wb_path)).sheet_by_index(0) \\\n for temp_wb_path in os.listdir(temp_dir)], key=lambda x: x.name)\n\n # Remove duplicates\n sheet_name_list = [sheet.name for sheet in temp_sheets]\n for sheet_name in sheet_name_list:\n if sheet_name_list.count(sheet_name) > 1:\n print ('More than 1 {}'.format(sheet_name))\n sheet_name_list.remove(sheet_name)\n\n for sheet in submitted_sheets:\n if sheet.name == sheet_name:\n submitted_sheets.remove(sheet)\n\n for sheet in temp_sheets:\n if sheet.name == sheet_name:\n temp_sheets.remove(sheet)\n\n if len(temp_sheets) == 0:\n print ('No Temp Sheets')\n return False\n\n elif len(submitted_sheets) == 0:\n print ('No Submitted Sheets')\n return False\n\n for temp_sheet, submitted_sheet in zip(temp_sheets, submitted_sheets):\n for row in range(9, submitted_sheet.nrows):\n for col in range(submitted_sheet.ncols):\n if temp_sheet.cell_value(row, col) != submitted_sheet.cell_value(row, col):\n print (submitted_sheet.name,\n row+1,\n col+1,\n submitted_sheet.cell_value(row, col),\n temp_sheet.cell_value(row, col)\n )\n\n return False", "def geneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n \n \n enzymes = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIdToEnzyme = dict()\n for enzyme in enzymes:\n geneIdToEnzyme[enzyme.geneID] = enzyme\n \n enzymePairs = geneDuplicationModel.getEnzymePairs(enzymes, ignoreDuplicatesOutsideSet = True, geneIdToEnzyme = geneIdToEnzyme, preCalculatedEnzymes = None)\n \n return enzymePairs", "def _remove_duplicates(self):\n self.search_query = remove_duplicates(self.search_query)", "def get_duplicates(input: List[str]) -> List[str]:\n\n output = input # Replace with your logic\n\n return output", "def is_duplicate(self):\n return bool(self.duplicated)", "def contains_duplicate_fast_set(self, nums: List[int]) -> bool:\n visited = set()\n for i in nums:\n if i in visited:\n return True\n visited.add(i)", "def contains_duplicate_full_slow_set(self, nums: List[int]) -> bool:\n return len(nums) != len(set(nums))", "def get_org_duplicates(dddb):\n try:\n dddb.execute(SELECT_NEW_OIDS)\n oid_list = dddb.fetchall()\n\n dup_orgs = list()\n\n for oid in oid_list:\n org_concept = {'oc_oid': int(oid[0]), 'duplicates': []}\n\n dddb.execute(SELECT_ORG_CONCEPT_NAME.format(oid[0]))\n\n org_concept['canon_name'] = dddb.fetchone()[0]\n\n if org_concept['oc_oid'] == -5:\n org_concept_short_name = \"'%california state association of counties%'\"\n elif org_concept['oc_oid'] == -6:\n org_concept_short_name = \"'%california district attorney%'\"\n elif org_concept['oc_oid'] == -12:\n org_concept_short_name = \"'%association of california water%'\"\n elif org_concept['oc_oid'] == -22:\n org_concept_short_name = \"'%department of education%'\"\n else:\n org_concept_short_name = ' '.join([word for word in org_concept['canon_name'].split(' ')[:2]])\n org_concept_short_name = \"'%\" + org_concept_short_name.strip() + \"%'\"\n\n print(org_concept_short_name)\n dddb.execute(SELECT_DUPLICATE_ORGS.format(oid[0], oid[0], oid[0], org_concept_short_name))\n\n duplicate_list = dddb.fetchall()\n for row in duplicate_list:\n org = {'oid': int(row[0]), 'name': row[1], 'is_abbreviation': 0}\n org = check_deletion(org_concept['canon_name'], org)\n org = identify_subchapter(org_concept['canon_name'], org)\n org_concept['duplicates'].append(org)\n\n dup_orgs.append(org_concept)\n\n return dup_orgs\n\n except MySQLdb.Error:\n print(traceback.format_exc())", "def test__get_duplicates(self):\n\n result = list_of_duplicates\n expected = [\n 'Fred',\n 'Sarah',\n 'Matthew',\n 'Joanna',\n 'Sam',\n ]\n\n self.assertListEqual(sorted(result), sorted(expected))", "def remove_duplicate_on_name_adduct(self, peak_df):\n\n duplicate_df = peak_df[peak_df.sec_id.duplicated()]\n dup_ids = duplicate_df['sec_id'].values\n\n print(\"Looking for compounds that have already been chosen with the same name and adduct\")\n\n duplicates = peak_df['sec_id'].isin(dup_ids)\n all_duplicates = peak_df[duplicates]\n print (\"current duplicates are:\")\n display(all_duplicates)\n\n # Check if there are any of the same compounds already stored.\n\n # For each secondary_id\n for dupid in dup_ids:\n\n duplicates = peak_df[peak_df['sec_id'] == dupid]\n\n for index, row in duplicates.iterrows():\n\n name = row['compound']\n adduct = row['adduct']\n\n name_match = peak_df['compound'] == name\n adduct_match = peak_df['adduct'] == adduct\n no_duplicates = peak_df['sec_id'] != dupid\n\n matching_rows = peak_df[name_match & adduct_match & no_duplicates]\n\n if matching_rows.index.any():\n print(\"we have aready strored this compound/adduct ratio so dropping this\")\n display(matching_rows)\n peak_df = peak_df.drop(index)\n else:\n print(\"no matching row for \", name, adduct)\n\n return peak_df", "def findDuplicateWorkingFiles(self, initialList, curInfix, newInfix):\n Duplicate_List = []\n for fname in initialList:\n infixStream = iccs_apex.whatInfixIsStream(fname)\n if (infixStream == curInfix):\n prefixStream, postfixStream = string.split(fname, infixStream)\n A_File_Name = prefixStream + newInfix + postfixStream\n if (os.path.exists(A_File_Name)):\n Duplicate_List = Duplicate_List + [A_File_Name]\n \n return Duplicate_List", "def remove_double_duplicates(self, peak_df):\n print (\"Checking for peaks with duplicate compounds that match (compound name/adduct) other duplicate peaks\")\n duplicate_df = peak_df[peak_df.sec_id.duplicated()]\n dup_ids = duplicate_df['sec_id'].values\n\n duplicates = peak_df['sec_id'].isin(dup_ids)\n all_duplicates = peak_df[duplicates]\n\n # if there are any duplicate names in the duplicate peaks then we have\n if any(all_duplicates['compound'].duplicated()):\n\n # Get the compound names\n dup_compounds = all_duplicates[all_duplicates['compound'].duplicated(keep=False)]\n print (\"`Duplicate peaks with duplicate compounds\")\n display(dup_compounds)\n sids = list(np.unique(dup_compounds['sec_id'].values))\n\n df_to_check = peak_df[peak_df['sec_id'].isin(sids)]\n name_rt_dict = {}\n\n for index, row in df_to_check.iterrows():\n name_rt_dict[index] = [row['compound'], row['rt']]\n\n print(name_rt_dict)\n\n keep_index = self.get_closest_rt_match(name_rt_dict)\n\n sec_id_chosen = peak_df.loc[keep_index, 'sec_id']\n\n dup_peaks = peak_df[peak_df[\"sec_id\"] == sec_id_chosen]\n dup_indexes = list(dup_peaks.index.values)\n\n peak_df = self.drop_duplicates(dup_indexes, keep_index, peak_df)\n\n return peak_df", "def has_duplicates(some_list):\n # This function will take a list and check is some element appears more than 1 time return True, If not return False.\n check = False\n for e in some_list:\n for i in range(len(some_list)):\n #print(\"{0:>3} {1:<3}\".format(f\"{e}\",f\"{some_list[i]}\"),end=\"\")\n if e == some_list[i] and some_list.index(e) != i:\n check = True\n else:\n pass\n #print(\"\")\n return check", "def check_against_all_others(progress_bar: Progress, prog_bar_task: int, golden_form: dict, form_data_list: list):\n duplicate_form_ids = list()\n final_dict = dict() # {GoldenFormID: ListOfDuplicateForms}\n field_skip_list = [\"_ID\"]\n\n # single_form_dedupe_task = progress_bar.add_task(f\"[magenta]Single Form DeDupe\", total=len(form_data_list))\n\n for count, test_form in enumerate(form_data_list):\n # Reset if the form is a Match or not each loop\n match = True\n if test_form.get(\"_ID\") == golden_form.get(\"_ID\"):\n # The golden form, we can skip deduplicating this one\n pass # progress_bar.console.log(f\"Golden Form\")\n else:\n # Not the golden form\n for form_field_key, form_field_value in test_form.items():\n if form_field_key in field_skip_list:\n pass\n else:\n if golden_form.get(form_field_key) == test_form.get(form_field_key):\n # Match!\n pass\n else:\n # No match!\n match = False\n break\n\n progress_bar.update(prog_bar_task, advance=1)", "def test_duplicates():\n\n conn = psycopg2.connect(host=\"sculptor.stat.cmu.edu\", database=c.DB_USER,\n user=c.DB_USER, password=c.DB_PASSWORD)\n cur = conn.cursor()\n cur.execute(\"\"\" SELECT COUNT(CONCAT(song_title, ' ', artist_name)) \n FROM songs \"\"\")\n count1 = cur.fetchone()[0]\n cur.execute(\"\"\" SELECT COUNT(DISTINCT CONCAT(song_title, ' ', artist_name))\n FROM songs \"\"\")\n count2 = cur.fetchone()[0]\n assert count1-count2 == 0", "def findDuplicateReleaseFiles(self, initialList, workingTowerName, newInfix):\n Release_Tower_Name = self.getReleaseVersion(workingTowerName, newInfix)\n Duplicate_List = []\n for fname in initialList:\n prefixStream, postfixStream = string.split(fname, workingTowerName)\n A_File_Name = prefixStream + Release_Tower_Name + postfixStream\n if (os.path.exists(A_File_Name)):\n Duplicate_List = Duplicate_List + [A_File_Name]\n \n return Duplicate_List", "def find_matching_pastes(self,raw_pastes):\n\n matching_pastes=dict()\n \n for key, rp in raw_pastes.items() :\n for rex in self.rexps :\n if rex.search(rp) :\n matching_pastes[key]=rp\n break \n return matching_pastes", "def check_duplicates(filename=None):\r\n i=0\r\n for line in open(filename):\r\n a=line.split(\" \")\r\n i=i+1\r\n b=set(a[:])\r\n for item in b:\r\n if a.count(item) > 1:\r\n print \"line\",i,\"---->\",item\r\n print \"end\"", "def find_duplicate_texts(self, name, text_key=None):\n if not text_key: text_key = self.text_key\n values = self._get_valuemap(name, text_key=text_key)\n dupes_check = []\n text_dupes = []\n for value in values:\n if value[1] in dupes_check:\n text_dupes.append(value[1])\n dupes_check.append(value[1])\n text_dupes = list(set(text_dupes))\n dupes = []\n for value in values:\n if value[1] in text_dupes:\n dupes.append(value)\n dupes = list(sorted(dupes, key=lambda x: x[1]))\n return dupes", "def generate_missing_and_dupes():\n missing = []\n possible_dupes = defaultdict(list)\n for a in Attachment.objects.all():\n path = a.attachment.path\n if not os.path.exists(path):\n missing.append(a)\n continue\n with open(path, 'rb') as f:\n hasher = hashlib.md5()\n hasher.update(f.read())\n file_hash = hasher.hexdigest()\n possible_dupes[file_hash].append(a)\n real_dupes = {k: v for k, v in possible_dupes.items() if len(v) > 1}\n return missing, real_dupes", "def submissions_against_submissions(submission_directory=os.path.join(os.getcwd(), \"submissions\"), threshold=.95, skip=[]):\n # Regular expressions.\n username_finder = re.compile(r\"(.*)_\\d+.code$\")\n\n for problem_folder in os.listdir(submission_directory):\n if problem_folder in skip:\n continue\n print(problem_folder)\n problem_directory = os.path.join(submission_directory, problem_folder)\n submissions = os.listdir(problem_directory)\n for submission in submissions:\n # Get the username associated with this submission.\n submission_user = username_finder.findall(submission)[0]\n submission_path = os.path.join(problem_directory, submission)\n for submission_other in submissions:\n # Check that the submissions are not made by the same user.\n if submission_user != username_finder.findall(submission_other)[0]:\n submission_other_path = os.path.join(problem_directory, submission_other)\n # Compare the submissions using SequenceMatcher.\n a = open(submission_path, \"r\")\n b = open(submission_other_path, \"r\")\n ratio = SequenceMatcher(None, a.read(), b.read()).ratio()\n if ratio >= threshold:\n print(\"\",submission, submission_other,ratio,sep=\"\\t\")\n a.close()\n b.close()\n # Pause between each problem. This can be removed in the future,\n # although I believe it makes it easier to see each individual problem.\n input(\"Press enter to continue to the next problem.\")", "def contains_duplicate(self, nums: List[int]) -> bool:\n if not nums:\n return\n\n nums.sort()\n\n if len(nums) == 1:\n return False\n\n for i in range(1, len(nums)):\n if nums[i - 1] == nums[i]:\n return True\n return False", "def test_duplicate_entries(self):", "def findDups(chromosome, nPos, totPlayers):\n duplicate = 1\n while (duplicate == 1):\n dupsFound = 0\n tempList = [player for player in chromosome]\n tempList.sort()\n last = -1\n for current in tempList:\n if current == last:\n chromosome = genRandTeam(nPos, totPlayers)\n dupsFound = 1\n break\n last = current\n if dupsFound == 0:\n return chromosome", "def list_duplicates(seq):\n # https://stackoverflow.com/questions/5419204\n tally = defaultdict(list)\n for i, item in enumerate(seq):\n tally[item].append(i)\n return ((key, locs) for key, locs in tally.items() if len(locs) > 1)", "def clean_duplicate_documents(self):\n title_k = lambda x: x.title\n for k, g in groupby(sorted(self.annotation_documents, key=title_k), title_k):\n g = list(g)\n if len(g) > 1:\n # check first if one is in test set\n to_remove = [x for x in g if x not in self.test]\n if (\n len(to_remove) > 1\n ): # if test is not matched, make subselection based on annotation unit count\n select_k = lambda x: (\n len(x.events) + len(x.sentiment_expressions),\n x.annotator_id != \"gilles\",\n )\n to_remove.sort(key=select_k, reverse=True)\n to_remove = to_remove[1:]\n for docrm in to_remove:\n self.annotation_documents.remove(docrm)\n if docrm in self.dev:\n self.dev.remove(docrm)\n elif docrm in self.test:\n self.test.remove(docrm)\n print(f\"Duplicate doc removed: {docrm}\")", "def duplicated(list):\n u, c = np.unique(list, return_counts=True)\n dup = u[c > 1]\n return dup", "def _compute_sims(self):\n no_duplicates = defaultdict(list)\n for num, lineset1, idx1, lineset2, idx2 in self._iter_sims():\n duplicate = no_duplicates[num]\n for couples in duplicate:\n if (lineset1, idx1) in couples or (lineset2, idx2) in couples:\n couples.add((lineset1, idx1))\n couples.add((lineset2, idx2))\n break\n else:\n duplicate.append({(lineset1, idx1), (lineset2, idx2)})\n sims = []\n for num, ensembles in no_duplicates.items():\n for couples in ensembles:\n sims.append((num, couples))\n sims.sort()\n sims.reverse()\n return sims", "def check_bib_file_for_duplicates(bibfile):\n arxiv, doi = [], []\n arxiv_key, doi_key = [], []\n print(\"% Checking bib file {:s} for duplicates...\".format(bibfile))\n with open(bibfile, \"r\") as f:\n key = \"\"\n for line in f:\n t = line.split(\"@\")\n if len(t) > 1:\n key = t[1].split(\"{\")[1].split(\",\")[0]\n t = line.split(\"eprint = \")\n if len(t) > 1:\n arxiv.append(t[1].split(\",\\n\")[0][1:-1])\n arxiv_key.append(key)\n t = line.split(\"doi = \")\n if len(t) > 1:\n doi.append(t[1].split(\",\\n\")[0][1:-1])\n doi_key.append(key)\n u, c = np.unique(arxiv, return_counts=True)\n d_arxiv = u[c > 1]\n n_d_arxiv = len(d_arxiv)\n u, c = np.unique(doi, return_counts=True)\n d_doi = u[c > 1]\n n_d_doi = len(d_doi)\n if n_d_arxiv + n_d_doi > 0:\n print(\n \"% WARNING. {:d} duplicate arXiv ID(s) and {:d} duplicate DOI(s) detected!\".format(\n n_d_arxiv, n_d_doi\n )\n )\n print(\n \"% You need to fix the following equivalent keys for the unique IDs listed below:\"\n )\n print(\"ID | Keys\")\n if n_d_arxiv > 0:\n print_duplicates(d_arxiv, arxiv, arxiv_key)\n if n_d_doi > 0:\n print_duplicates(d_doi, doi, doi_key)\n else:\n print(\"% Done, no duplicates detected!\")", "def num_matches(students, samples):\n count = 0\n for i in range(samples):\n bday_list = gen_birthdays(students)\n if has_duplicates(bday_list):\n count += 1\n return count", "def FindDuplicates(seq):\n dup = set()\n seen = set()\n\n for item in seq:\n if item in seen:\n dup.add(item)\n else:\n seen.add(item)\n\n return list(dup)", "def get_duplicates(files):\n checksums = {}\n # Create a dictionary of files, keyed by their checksum\n for f in files:\n checksum = get_checksum(f)\n checksums.setdefault(checksum,[]).append(f)\n # Create a list of lists of duplicate files. Only file lists\n # with more than one item need apply.\n duplicates = []\n for csum, file_list in checksums.items():\n if len(file_list)>1:\n duplicates.append(file_list)\n return duplicates", "def check_duplicate(self, state):\n pass", "def verifyDistinct( options, data ):\n tot = 0\n for c in data.chrNames:\n s = set()\n d = mafDataOrNone( data.mafBlocksByChrom, c )\n if d is None:\n continue\n for mb in d:\n for i in xrange( mb.refStart, mb.refEnd + 1):\n if i in s:\n sys.stderr.write('duplicate base found! %s %d [%d-%d], %s [%d-%d]\\n'\n % (mb.refChr, i, mb.refStart, mb.refEnd, \n mb.pairChr, mb.pairStart, mb.pairEnd ))\n sys.exit( 1 )\n else:\n s.add( i )\n tot += len( s )\n sys.stderr.write( 'Verify all bases sent to be binned are distinct: Found %s distinct bases in the alignment to the reference genome, no duplicates, OK.\\n' % tot)", "def submissions_against_solutions(submission_directory=os.path.join(os.getcwd(), \"submissions\"), solution_directory=os.path.join(os.getcwd(), \"solutions\"), threshold=.95, skip=[]):\n for problem_folder in os.listdir(submission_directory):\n if problem_folder in skip:\n continue\n print(problem_folder)\n problem_directory = os.path.join(submission_directory, problem_folder)\n solution_problem_directory = os.path.join(solution_directory, problem_folder)\n for submission in os.listdir(problem_directory):\n submission_path = os.path.join(problem_directory, submission)\n for solution in os.listdir(solution_problem_directory):\n solution_path = os.path.join(solution_problem_directory, solution)\n # Compare the submissions using SequenceMatcher.\n a = open(submission_path, \"r\")\n b = open(solution_path, \"r\")\n ratio = SequenceMatcher(None, a.read(), b.read()).ratio()\n if ratio >= threshold:\n print(\"\",submission, solution,ratio,sep=\"\\t\")\n a.close()\n b.close()\n # Pause between each problem. This can be removed in the future,\n # although I believe it makes it easier to see each individual problem.\n input(\"Press enter to continue to the next problem.\")", "def is_duplicate(self, **kwargs):\n return len(list(self.c.select(**kwargs))) > 0", "def tuple_has_duplicates(my_tuple):\n\n duplicates = []\n for i in my_tuple:\n if my_tuple.count(i) > 1 and i not in duplicates:\n duplicates.append(i)\n counter = 0\n # for k in my_tuple:\n # if i == k:\n # counter += 1\n # if counter > 1 and i not in duplicates:\n # duplicates.append(i)\n if duplicates:\n return duplicates\n else:\n return False", "def test_build_reference_dupes(self):\n items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\n obs1, obs2 = build_reference(items, 3)\n self.assertEqual(len(obs1), 3)\n self.assertEqual(len(obs2), 7)\n #check that the ref and nonref are same\n finals = set([])\n for item in obs1:\n if item in finals:\n raise AssertionError(\"Duplicate in reference!\")\n finals.add(item)\n for item in obs2:\n if item in finals:\n raise AssertionError(\"Duplicate in nonreference!\")\n finals.add(item)", "def markDuplicates(in_data):\n\tdata = sorted(in_data)\n\tmarked = [data[0]]\n\tfor i in range(1, len(data)):\n\t\tinstance = data[i]\n\t\tif instance == data[i-1]:\n\t\t\tmarked.append(instance + ['duplicate'])\n\t\telse:\n\t\t\tmarked.append(instance)\n\treturn marked", "def answer(codes):\n s = set()\n num_distinct_codes = 0\n for code in codes:\n if code in s:\n continue\n elif is_palindrome(code):\n s.add(code)\n else:\n s.add(code)\n s.add(code[::-1])\n num_distinct_codes += 1\n return num_distinct_codes", "def check_sparkdf_find_dupes(sparkdf,columns):\n\n\treturn sparkdf.groupBy(columns).count().where('count>1').sort('count', ascending=False)", "def picard_mark_duplicates(self, bam_in, files_out):\n self.cmd(\"{picard_cmd}/MarkDuplicates.jar \\\n INPUT='{bam_in}' \\\n OUTPUT='/dev/stdout' \\\n METRICS_FILE={metricfile_out} \\\n REMOVE_DUPLICATES=false ASSUME_SORTED=true COMPRESSION_LEVEL=5 \\\n VALIDATION_STRINGENCY=LENIENT \\\n MAX_RECORDS_IN_RAM=5000000 \\\n CREATE_INDEX=false \\\n TMP_DIR={local_temp_dir} \\\n > {bam_out}\"\n .format(\n picard_cmd=self.cmds[\"picard\"],\n bam_in=bam_in,\n bam_out=files_out[0],\n metricfile_out=files_out[1],\n local_temp_dir=self.local_temp_dir\n ),\n on_error=lambda: self.create_error_file(files_out),\n checkpoint_file=files_out[0],\n shell=True)\n\n self.checkpoint(files_out[1])\n\n if self.remove_intermediate:\n self.rm(bam_in)", "def list_duplicates(seq):\n tally = defaultdict(list)\n for i, item in enumerate(seq):\n try:\n if item.mask == True:\n continue\n except:\n tally[item].append(i)\n return ((key, locs) for key, locs in tally.items() if len(locs) > 1)", "def duplicated_code():\n author_ids = []\n updated_books = []\n updated_authors = []\n \n for author in mongo.db.authors.find():\n updated_authors.append(author)\n for book in mongo.db.books.find():\n \n # Create a new key/value pair in each book for author_name\n # by looking up the author_id and matching it to the author_name\n # of the selected author_id.\n \n book_title = book['title']\n author_id = book['author_id']\n \n for author in updated_authors:\n if author['_id'] == ObjectId(author_id):\n book['author_name'] = author['author_name']\n \n \n # Using the googlebooks API search for each book and retrieve\n # a thumbnail of the book.\n \n google_api_title = book_title.replace(' ', '+')\n book_isbn_num = book['isbn_num']\n with urllib.request.urlopen(BASE_API_LINK + google_api_title) as f:\n text = f.read()\n decoded_text = text.decode(\"utf-8\")\n obj = json.loads(decoded_text) \n google_book_obj = obj[\"items\"][0]\n book_href = google_book_obj['volumeInfo']\n if 'imageLinks' in book_href:\n book['href'] = book_href['imageLinks']['thumbnail']\n \n # Append book to new book dictionary.\n updated_books.append(book)\n \n return updated_books", "def isduplicate(self, a, b):\n open(self.mybib, 'w').write(a)\n open(self.otherbib, 'w').write(b)\n res = sp.call('papers add {} --bibtex {} --update-key --mode r --debug'.format(self.otherbib, self.mybib), shell=True)\n return res != 0", "def final_dup_check(cat):\n # Enforce chronological order\n cat.events.sort(key=lambda x: x.preferred_origin().time)\n dups = []\n others = []\n # Loop through and determine which of dups is detection and which is\n # template. Remove detection.\n for i, ev in enumerate(cat):\n if ev.preferred_origin().time - cat[i-1].preferred_origin().time < 2.:\n # Which is which\n if ev.creation_info.author == 'EQcorrscan':\n dups.append(ev)\n others.append(cat[i-1])\n print('Other event author: {}'.format(\n cat[i-1].creation_info.author))\n elif cat[i-1].creation_info.author == 'EQcorrscan':\n dups.append(cat[i-1])\n others.append(ev)\n print('Other event author: {}'.format(\n ev.creation_info.author))\n else:\n print('Neither')\n return dups, others", "def count_dups(self):\n command = \"SELECT COUNT(path) FROM files WHERE checksum IN (SELECT\\\n checksum FROM files GROUP BY checksum HAVING\\\n COUNT(checksum) >1) ORDER BY checksum;\"\n return self.c.execute(command)", "def get_unique_snps(self):\n\n for chromosome in self.snpsites.keys():\n\n for position in self.snpsites[chromosome].keys():\n for filenumber in range(len(self.vcffilenames)):\n\n if (\n self.snpsites[chromosome][position][filenumber] == True\n and sum(self.snpsites[chromosome][position]) == 1\n ): # First any(array) finds\n self.snp_positions[self.vcffilenames[filenumber]][chromosome][\n position\n ].update({\"unique\": True})\n elif (\n sum(self.snpsites[chromosome][position]) >= 2\n ): # there might be snp at same position but with different alt base\n\n snp_index = [\n i\n for i, j in enumerate(self.snpsites[chromosome][position])\n if j == True\n ]\n\n totalindex = len(snp_index)\n # Lets check the alt base in these vcf files using index\n # lets get array of alt bases from each file\n alt_snps = []\n for index in snp_index:\n alt_snps.append(\n self.snp_positions[self.vcffilenames[index]][\n chromosome\n ][position][\"alt\"]\n )\n\n # get the counts of the elements\n\n counts = self.count_list_elements_occurrences(alt_snps)\n\n for index in range(len(counts)):\n if counts[index] == 1:\n # this is unique, so occurred once\n self.snp_positions[self.vcffilenames[snp_index[index]]][\n chromosome\n ][position].update(\n {\"unique\": True}\n ) # vcffilenames[snp_index[index]] = this will be the filename\n # print(\"this is unique\", vcffilenames[snp_index[index]], chromosome, position, self.snp_positions[vcffilenames[snp_index[index]]][chromosome][position])\n\n # else:\n # \tvcf_database[\"self.snp_positions\"][chromosome + \"_\" + position].update({\"unique\":False})\n\n return", "def test_teacher_check_homework_raises_homework_repeat_error_if_same_solution_was_already_submitted():\n with pytest.raises(HomeworkRepeatError):\n opp_teacher.check_homework(result_1)\n advanced_python_teacher.check_homework(result_1)\n Teacher.reset_results(oop_hw)", "def duplicates(ls: list):\n\n seen = set([])\n dups = set([])\n\n for x in ls:\n if x in seen:\n dups.add(x)\n else:\n seen.add(x)\n\n return dups", "def similarparts(imagparts):\n dupl = []\n global opt\n l = len(imagparts[0])-1\n \n for i in range(len(imagparts)-1): \n difs = sum(abs(x-y) for x,y in zip(imagparts[i][:l],imagparts[i+1][:l]))\n mean = float(sum(imagparts[i][:l])) / l\n dev = float(sum(abs(mean-val) for val in imagparts[i][:l])) / l\n if dev/mean >= float(opt.blcoldev):\n if difs <= int(opt.blsim):\n if imagparts[i] not in dupl:\n dupl.append(imagparts[i])\n if imagparts[i+1] not in dupl:\n dupl.append(imagparts[i+1])\n\n return dupl", "def duplicates(items):\n duplicate_items = set()\n for item in items:\n if items.count(item) > 1:\n duplicate_items.add(item)\n duplicate_list = list(duplicate_items)\n\n\n return sorted(duplicate_list)", "def is_duplicates(trajs):\n if len(trajs) < 2:\n return False \n for j in range(len(trajs)-1):\n for i in range(j+1, len(trajs)):\n R = (trajs[i].get_slice()[:,:2]==trajs[j].get_slice()[:,:2])\n if isinstance(R, bool):\n if R:\n return True \n elif R.all():\n return True \n else:\n pass\n return False", "def has_duplicates(l):\r\n return len(set(l)) < len(l)", "def dupCheck(doc, col):\n\tdocList = list()\n\twith open(doc) as f:\n\t\tfor l in f.readlines():\n\t\t\tname = l.split(',')\n\t\t\tdocList.append(name[col])\n\t\tif len(docList) != len(set(docList)):\n\t\t\tprint(\"Duplicates Detected\")", "def addedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n # get added metabolism\n addedMetabolismEnzymes = self.addedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the added metabolism\n geneDuplicatedAdded = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in addedMetabolismEnzymes and enzymeTuple[1] in addedMetabolismEnzymes:\n geneDuplicatedAdded.add(enzymeTuple)\n \n return geneDuplicatedAdded", "def containsDuplciateOptimized(self, nums):\n nums = sorted(nums)\n for i in range(1, len(nums)):\n if nums[i] == nums[i - 1]:\n return True\n return False", "def _compute_unique_approval_scores(self, profile: list[set[int]]) -> list[int]:\n unique_approval_scores = np.zeros(self.m, dtype=int)\n for party in range(0, self.m):\n for ballot in profile:\n if ballot == {party}:\n unique_approval_scores[party] += 1\n return list(unique_approval_scores)", "def unique_contigs_are_unique(scaffold_list, unique_contigs_list):\n i= 0\n old_scaffold_list = copy.deepcopy(scaffold_list)\n old_scaffold_list = purge_redundancy(old_scaffold_list)\n new_scaffold_list = []\n while new_scaffold_list != old_scaffold_list and i < 20:\n \n i += 1\n if i != 1: \n old_scaffold_list = copy.deepcopy(new_scaffold_list)\n #new list is now old list\n new_scaffold_list = new_resolve_unique_contigs(old_scaffold_list, unique_contigs_list) \n new_scaffold_list = purge_redundancy(new_scaffold_list)\n\n return new_scaffold_list", "def _findSamesetProteins(protToPeps, proteins=None):\n proteins = viewkeys(protToPeps) if proteins is None else proteins\n\n equalEvidence = ddict(set)\n for protein in proteins:\n peptides = protToPeps[protein]\n equalEvidence[tuple(sorted(peptides))].add(protein)\n equalProteins = list()\n for proteins in viewvalues(equalEvidence):\n if len(proteins) > 1:\n equalProteins.append(tuple(sorted(proteins)))\n return equalProteins", "def get_duplicate_rows(df):\n\treturn df.duplicated().sum()", "def removeduplicates(facts: List[FHIRObservationFact]) -> List[FHIRObservationFact]:\n fact_keys: List[Tuple[int, str, str]] = []\n rval: List[FHIRObservationFact] = []\n for fact in facts:\n k = (fact.instance_num, fact.concept_cd, fact.modifier_cd)\n if k not in fact_keys:\n fact_keys.append(k)\n rval.append(fact)\n return rval" ]
[ "0.6404877", "0.6166872", "0.58831835", "0.5859948", "0.58292764", "0.58159447", "0.57876647", "0.5782457", "0.5769974", "0.5762715", "0.5746975", "0.5736089", "0.5733144", "0.57238513", "0.5710152", "0.5654848", "0.553705", "0.5500801", "0.54478735", "0.5438873", "0.54368997", "0.54306716", "0.54144025", "0.54073423", "0.53861696", "0.53712606", "0.5346199", "0.53129274", "0.5275559", "0.5272303", "0.52654654", "0.5253832", "0.52518404", "0.52423084", "0.5240281", "0.52258253", "0.52256054", "0.52118677", "0.5202786", "0.52010465", "0.51914126", "0.5180465", "0.51748914", "0.51700175", "0.5169648", "0.5169234", "0.51667815", "0.5164773", "0.5157722", "0.5133502", "0.5126463", "0.5119363", "0.51187176", "0.51108956", "0.5110408", "0.5109832", "0.51076996", "0.50984716", "0.5097678", "0.5094511", "0.5091332", "0.50904655", "0.50869375", "0.5078267", "0.5077857", "0.50686854", "0.5067229", "0.5054262", "0.5041183", "0.50396544", "0.5038362", "0.5032726", "0.5024741", "0.501954", "0.5010165", "0.5007848", "0.50041324", "0.50022525", "0.49990454", "0.49948704", "0.49916032", "0.4988013", "0.49817118", "0.49812546", "0.49807405", "0.49777484", "0.4976205", "0.49662265", "0.49650034", "0.49635476", "0.49535814", "0.4949313", "0.4943261", "0.4935545", "0.49355334", "0.49325898", "0.49281418", "0.49267897", "0.49233982", "0.49214882" ]
0.753223
0
Display the duplication check results.
def display_dup(dup_result): lines = [k + ": " + ", ".join(v) for k, v in dup_result] return lines
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def for_duplicates(self):\n print('++++++++++++ Duplicates Check Start+++++++++++++')\n print('Report for:', self.name)\n if not self.df.empty:\n for column in self.df.columns:\n if self.df.duplicated(column).sum() > 0:\n print('Duplicates found in: ', column)\n else:\n print('No duplicates found in: ', column)\n else:\n print('Empty data set')\n print('++++++++++++ Duplicates Check End+++++++++++++')", "def _display_sims(self, sims):\n nb_lignes_dupliquees = 0\n for num, couples in sims:\n print()\n print(num, \"similar lines in\", len(couples), \"files\")\n couples = sorted(couples)\n lineset = idx = None\n for lineset, idx in couples:\n print(\"==%s:%s\" % (lineset.name, idx))\n if lineset:\n for line in lineset._real_lines[idx : idx + num]:\n print(\" \", line.rstrip())\n nb_lignes_dupliquees += num * (len(couples) - 1)\n nb_total_lignes = sum([len(lineset) for lineset in self.linesets])\n print(\n \"TOTAL lines=%s duplicates=%s percent=%.2f\"\n % (\n nb_total_lignes,\n nb_lignes_dupliquees,\n nb_lignes_dupliquees * 100.0 / nb_total_lignes,\n )\n )", "def test_display_name(self):\r\n def verify_name(source_usage_key, parent_usage_key, expected_name, display_name=None):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key, display_name)\r\n duplicated_item = self.get_item_from_modulestore(usage_key, draft=True)\r\n self.assertEqual(duplicated_item.display_name, expected_name)\r\n return usage_key\r\n\r\n # Display name comes from template.\r\n dupe_usage_key = verify_name(self.problem_usage_key, self.seq_usage_key, \"Duplicate of 'Multiple Choice'\")\r\n # Test dupe of dupe.\r\n verify_name(dupe_usage_key, self.seq_usage_key, \"Duplicate of 'Duplicate of 'Multiple Choice''\")\r\n\r\n # Uses default display_name of 'Text' from HTML component.\r\n verify_name(self.html_usage_key, self.seq_usage_key, \"Duplicate of 'Text'\")\r\n\r\n # The sequence does not have a display_name set, so category is shown.\r\n verify_name(self.seq_usage_key, self.chapter_usage_key, \"Duplicate of sequential\")\r\n\r\n # Now send a custom display name for the duplicate.\r\n verify_name(self.seq_usage_key, self.chapter_usage_key, \"customized name\", display_name=\"customized name\")", "def _render_results_scan_summary(self):\n\n core.add_text(\n 'Scan Summary',\n color=self._control_text_color,\n parent=self._window_name)\n\n core.add_text(\n 'Number of images scanned: ',\n parent=self._window_name)\n\n core.add_same_line(parent=self._window_name)\n\n core.add_text(\n name='number_of_scanned_images_text',\n source=NUMBER_OF_SCANNED_IMAGES,\n parent=self._window_name)\n\n core.add_text(\n 'Number duplicate image sets: ',\n parent=self._window_name)\n\n core.add_same_line(parent=self._window_name)\n\n core.add_text(\n str(len(self._duplicates_list)),\n parent=self._window_name)\n\n core.add_text('', parent=self._window_name)", "def print_duplicates(md):\n for digest,paths in md.iteritems():\n for p in paths:\n print digest, p\n # print blank line between groups\n print \"\"", "def print_results(self):\n pass", "def display_results():\n pass", "def showClusters(self,clusterOfFiles,batchSize=3):\n #groupCounter keeps track of how many clusters of duplicate files has been printed\n clusterCounter=0\n for acluster in clusterOfFiles:\n #print a cluster/group of duplicate files\n print(\"Duplicate group {0}\".format(clusterCounter+1))\n print (\"All of these files have the same content:\")\n for afile in acluster:\n print(afile)\n \n #increase the groupCounter by 1 as one group has been printed\n clusterCounter+=1\n if clusterCounter%batchSize==0:\n raw_input(\"Press any key for more duplicates\")", "def check(self):\n if not self.session:\n print(\"no database\")\n\n number_of_checked_workouts = 0\n number_of_merged_workouts = 0\n number_of_duplicate_workouts = 0\n workouts = self.session.query(Workout).all()\n for workout in workouts:\n number_of_checked_workouts += 1\n if workout.is_duplicate_with:\n number_of_duplicate_workouts += 1\n else:\n (a, b) = workout.handle_duplicates(self)\n number_of_duplicate_workouts += a\n number_of_merged_workouts += b\n logger.info('{} workouts checked, {} of them were duplicate, created {} merged workouts'\\\n .format(number_of_checked_workouts,\n number_of_duplicate_workouts,\n number_of_merged_workouts,))", "def _render_torrents(ctx, torrent_iterator, format):\n\n show_duplicates = ctx.params.get('show_duplicates', False)\n result_count = ctx.params.get('results', 25)\n\n (seen, count,) = (set(), 0,)\n while count < result_count:\n try:\n torrent = next(torrent_iterator)\n if torrent['hash'] not in seen:\n rendered = format.format(**COLORED, **torrent)\n if not show_duplicates:\n seen.add(torrent['hash'])\n\n yield (torrent, rendered,)\n count += 1\n except StopIteration:\n break\n\n if count <= 0:\n print((\n '{style.BOLD}{fore.WHITE}sorry, no results{style.RESET}'\n ).format(**COLORED))\n return", "def print_not_uniq(list, mesg=\"{multiplicity} times: {item}\"):\n\n not_uniq = search_not_uniq(list)\n for item, multiplicity in not_uniq.items():\n print(mesg.format(**locals()))\n return len(not_uniq)", "def test__get_duplicates(self):\n\n result = list_of_duplicates\n expected = [\n 'Fred',\n 'Sarah',\n 'Matthew',\n 'Joanna',\n 'Sam',\n ]\n\n self.assertListEqual(sorted(result), sorted(expected))", "def report(self, results):\n self.notice(\"Test Report\\n\")\n\n for count, group in enumerate(results, 1):\n results = (self._format_test(test, res) for test, res in group)\n results = (', ').join(results)\n self.notice(\"Test group %s:\\t%s\" % (count, results))\n\n self.divider()", "def show_results(self):\r\n\r\n if self.player_cards > self.computer_cards: # player wins\r\n print('\\nCongratulations!!')\r\n print('You WIN by {0} / {1}'.format(self.player_cards, self.computer_cards))\r\n elif self.player_cards < self.computer_cards: # computer wins\r\n print('\\nToo bad!!')\r\n print('You LOST by {0} / {1}'.format(self.player_cards, self.computer_cards))\r\n else: # tied\r\n print('You TIED by {0} / {1}'.format(self.player_cards, self.computer_cards))", "def _show_results(results):\n if len(results) == 0:\n click.echo(\"Could not find any command with these parameters.\")\n else:\n for kw, result in results.items():\n click.echo(kw.upper())\n for dic in result:\n if dic[\"explanation\"] != \"\":\n click.echo(\"\\t#%i\\t%s \\n\\t%s\" %(dic[\"id\"], dic[\"command\"], dic[\"explanation\"]))\n else:\n click.echo(\"\\t#%i\\t%s\" % (dic[\"id\"], dic[\"command\"]))", "def duplicates_marked_reciprocally():\n ids = FRAMEWORKS_DF['CURATED-COFs ID'].str\n messages = []\n\n for _index, row in FRAMEWORKS_DF.iterrows():\n if row['Duplicate found'] != 'none':\n original_id = row['CURATED-COFs ID']\n duplicate_id = row['Duplicate found']\n duplicate_row = FRAMEWORKS_DF.loc[FRAMEWORKS_DF['CURATED-COFs ID'] == duplicate_id ]\n if not len(duplicate_row) == 1:\n messages.append(f'Found row without reciprocal duplicate mark:\\n{row}')\n\n duplicate_row_original_id = duplicate_row['Duplicate found'].values[0]\n if not duplicate_row['Duplicate found'].values[0] == original_id:\n messages.append(f'Duplicate row lists ID {duplicate_row_original_id}, expected {original_id}')\n\n if messages:\n print('\\n'.join(messages))\n sys.exit(1)\n\n print('Rows marked as duplicates go both ways.')", "def test_duplicate_entries(self):", "def duplicate_record_check(cur):\n # get all created tables from db\n cur.execute(\"SELECT * FROM information_schema.tables WHERE table_schema='public'\")\n result = cur.fetchall()\n\n # create list of tables\n table_list = [table[2] for table in result]\n\n print('Checking tables for duplicate records...')\n\n # check each table for duplicates\n for table_name in table_list:\n cur.execute(f\"SELECT COUNT(*) FROM {table_name}\")\n row_count = cur.fetchall()\n cur.execute(f\"SELECT DISTINCT COUNT(*) FROM {table_name}\")\n distinct_count = cur.fetchall()\n if row_count[0][0] == distinct_count[0][0]:\n print(f\"GREAT, no duplicate records found in {table_name}!\")\n elif distinct_count[0][0] < row_count[0][0]:\n print(f\"WARNING, duplicate records found! {distinct_count[0][0]}\"\n f\"distinct record count is less than total record count of {row_count[0][0]}\")", "def _display_results(self):\n self._display_summary()\n self._display_domain_record()\n self._display_ip_record()\n self._display_cert_details()\n self._display_ti_data()\n self._display_screenshot()\n self._display_related_alerts()\n self._display_bookmarks()\n self._display_dns_results()\n self._display_hosts()\n self._display_flows()", "def _show_summary(self):\n print 'Summary:'\n print ' Reports downloaded successfully: %d' % self.counts\n print ' Reports not downloaded: %d\\n' % self.failed", "def print_results(self):\n for test_cases in self._tests:\n for test_case in test_cases:\n print('{} ...ok'.format(test_case.get_name()))\n return 0", "def display_results_for_errors(result):\n i = 0\n for r in result:\n print('\\t'+str(result[i][0])+' ---> '+str(result[i][1])+' %\\n')\n i = i + 1", "def display_results(results):\n winners = results[0]\n losers = results[1]\n pushers = results[2]\n blackjack_winners = results[3]\n print(generate_results_string(winners, \" wins\", \" win\"))\n print(generate_results_string(losers, \" loses\", \" lose\"))\n print(generate_results_string(pushers, \" pushes\", \" push\"))\n print(generate_results_string(blackjack_winners, \" wins with blackjack\", \" win with blackjack\"))", "def search_results(self, results):\n for index, item in enumerate(results):\n print '[%s] %s (%s) {%s}' % (\n index, \n self._color(item.title), \n self._color(item.year, 'RED'), \n self._color(item.imdbid, 'GREEN'))", "def show_result(dict_result):\r\n\r\n\tcorrects = dict_result[\"Corrects\"]\r\n\twrongs = dict_result[\"Wrongs\"]\r\n\tn_questions = dict_result[\"n_questions\"]\r\n\r\n\tprint(\"\\n\\n\",\"-\"*10,\"Final Result\", \"-\"*10)\r\n\r\n\tfinal_note = (len(corrects)*100)/n_questions\r\n\tprint(\"\\nResult: \", final_note*10)\r\n\r\n\tif final_note*10 > 600:\r\n\t\tprint(\"\\nYOU PASS!\")\r\n\telse:\r\n\t\tprint(\"\\nI'm sorry, you don't pass, but please try again!\")\r\n\r\n\tif len(wrongs) > 0:\r\n\t\tprint(\"\\nSome questions for review:\", end=\" \")\r\n\t\tfor i in wrongs:\r\n\t\t\tif i == wrongs[-1]:\r\n\t\t\t\tprint(i)\r\n\t\t\telse:\r\n\t\t\t\tprint(i, end=\", \")", "def _display_dns_results(self):\n if self.check_valid_result_data(\"dns_results\", silent=True):\n nb_markdown(f\"DNS events related to {self.url}\", \"bold\")\n display(self._last_result.dns_results)\n else:\n nb_markdown(f\"No DNS resolutions found for {self.url}\")", "def test_check_bc_duplicates_default_dups(self):\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'ACGT', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=False,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = ['Duplicate barcode ACGT found.\\t1,1',\r\n 'Duplicate barcode ACGT found.\\t2,1']\r\n\r\n self.assertEqual(errors, expected_errors)", "def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]", "def mark_duplicates():\n\n mkdir(MD_DIR)\n\n printp(\"\"\"# drmr:label mark-duplicates\\n\"\"\")\n printp(\"\"\"# drmr:job nodes=1 processors=1 memory=12g working_directory={} time_limit=8h\"\"\".format(MD_DIR))\n\n for sample, info in DATA.items():\n for x in ['treatment', 'control']:\n srr = get_srr(sample) if x == 'treatment' else get_input_control_srr(sample)\n input_bam = get_bwa_bam(sample, control=False) if x == 'treatment' else get_bwa_bam(sample, control=True)\n output_bam = get_md_bam(sample, control=False) if x == 'treatment' else get_md_bam(sample, control=True)\n printp(\"\"\"picard -m 8g MarkDuplicates I={input_bam} O={output_bam} ASSUME_SORTED=true METRICS_FILE={srr}.markdup.metrics VALIDATION_STRINGENCY=LENIENT TMP_DIR=.; samtools index {output_bam}\"\"\".format(**locals()), timed=True)\n\n printp(\"\"\"\\n# drmr:wait\"\"\")", "def expect_duplicate(self):\n # Reset everything for this record\n self._expect_duplicate = False\n self.__dupcntr = 0\n self.__maxdup = 0\n # Get the probability to generate duplicate for next record\n if self.fake.random.random() < self.duplicate_cfg[\"Prob_duplicate\"]:\n self._expect_duplicate = True\n self.__maxdup = self.random_select_ndups()\n else:\n self._expect_duplicate = False\n self.__maxdup = 0\n\n self.__logger.debug(\"expect_duplicate ndups: %d\", self.__maxdup)", "def test_duplicates():\n\n conn = psycopg2.connect(host=\"sculptor.stat.cmu.edu\", database=c.DB_USER,\n user=c.DB_USER, password=c.DB_PASSWORD)\n cur = conn.cursor()\n cur.execute(\"\"\" SELECT COUNT(CONCAT(song_title, ' ', artist_name)) \n FROM songs \"\"\")\n count1 = cur.fetchone()[0]\n cur.execute(\"\"\" SELECT COUNT(DISTINCT CONCAT(song_title, ' ', artist_name))\n FROM songs \"\"\")\n count2 = cur.fetchone()[0]\n assert count1-count2 == 0", "def PrintProgress(self):\n print ' Examined %d nodes, found %d unique...' % (\n self.nodes_examined, self.unique_nodes\n )", "def print_results(results):\n print(f\"Intial Entries: {results[0]}\")\n print(f\"Added Entries: {results[1]}\")\n print(f\"Final Entries: {results[2]}\")\n print(f\"Total Run Time: {results[3]}\")\n print(\"\\n\")", "def summarize(self):\n \n print self._num_tests, \"tests ran with\", len(self._failed_tests), \"failures:\", sorted(list(self._failed_tests))\n\n self._num_tests = 0\n self._failed_tests = set()", "def displayAudit():\n\tauditResults=runAudit(masterPod.currentMasterPod)\n\t#Get results and duplicates\n\tallResults=auditResults[\"ResultDict\"]\n\tduplicateResults=auditResults[\"DuplicateDict\"]\n\n\t#Display score\n\tauditScore=auditResults[\"Overall\"]\n\tauditScoreVar.set(str(auditScore)+\"%\")\n\n\tif auditScore >= 60:\n\t\tauditScoreLabel.update(fg=mainGreenColour)\n\telif auditScore >= 45:\n\t\tauditScoreLabel.update(fg=mainOrangeColour)\n\telse:\n\t\tauditScoreLabel.update(fg=mainRedColour)\n\n\t#Go through the results\n\tfor itemName in auditTable.rowInfo:\n\t\tif itemName in auditResults:\n\t\t\t#Update the label\n\t\t\tauditTable.updateRow(itemName,auditResults[itemName])\n\n\n\t#Update the buttons to they update on clicks\n\tfor rowText in auditTable.buttonInfo:\n\t\tif rowText == \"All accounts\":\n\t\t\tauditTable.updateButtonCommand(rowText,lambda: showAuditResults(allResults))\n\t\telif rowText == \"Strong Passwords\":\n\t\t\tsendResults={}\n\t\t\tfilterResults=[k for k,v in allResults.items() if v == 'Strong']\n\t\t\tfor i in filterResults:\n\t\t\t\tsendResults[i]=allResults[i]\n\t\t\tauditTable.updateButtonCommand(rowText,lambda s=sendResults: showAuditResults(s))\n\n\t\telif rowText == \"Average Passwords\":\n\t\t\tsendResults={}\n\t\t\tfilterResults=[k for k,v in allResults.items() if v == 'Medium']\n\t\t\tfor i in filterResults:\n\t\t\t\tsendResults[i]=allResults[i]\n\t\t\tauditTable.updateButtonCommand(rowText,lambda s=sendResults : showAuditResults(s))\n\n\t\telif rowText == \"Weak Passwords\":\n\t\t\tsendResults={}\n\t\t\tfilterResults=[k for k,v in allResults.items() if v == 'Weak']\n\t\t\tfor i in filterResults:\n\t\t\t\tsendResults[i]=allResults[i]\n\t\t\tauditTable.updateButtonCommand(rowText,lambda s=sendResults: showAuditResults(s))\n\n\t\telif rowText == \"Duplicates\":\n\t\t\tauditTable.updateButtonCommand(rowText,lambda: showAuditResults(duplicateResults))\n\n\t#Clear the tree\n\tauditResultsTree.delete(*auditResultsTree.get_children())", "def print_results(args):\n min_comics, filename = args\n with codecs.open(filename, 'a', 'utf-8') as fp:\n for name, shortname in sorted(load_result(json_file).items()):\n if name in exclude_comics:\n continue\n fp.write(u\"add(%r, %r)\\n\" % (\n str(truncate_name(name)), str(shortname))\n )", "def test_present_results_displays_results(self):\n # to test this we don't actually need to write to the database,\n # we just need a list of ordered_dicts in menu.records\n test_records = [\n OrderedDict([\n ('name', 'Test Employee 1'),\n ('date', datetime.date(2018, 5, 1)),\n ('task_name', 'Test Task 1'),\n ('duration', 1),\n ('notes', 'This is a note for the first test task')\n ]),\n OrderedDict([\n ('name', 'Test Employee 2'),\n ('date', datetime.date(2018, 5, 2)),\n ('task_name', 'Test Task 2'),\n ('duration', 2),\n ('notes', 'This is a note for the second test task')\n ]),\n ]\n self.menu.records = [test_records[0]]\n f_username = test_records[0]['name']\n f_date = test_records[0]['date'].strftime(\"%Y-%m-%d\")\n f_time_taken = str(test_records[0]['duration'])\n f_task_name = test_records[0]['task_name']\n f_notes = test_records[0]['notes']\n short_form = \"{}: {} ({}m): {} | {}\".format(\n f_username,\n f_date,\n f_time_taken,\n f_task_name,\n f_notes\n )\n expected_output = (\"\\nSearch Results\\n\" +\n \"1) {}\\n\".format(short_form) +\n \"\\n\" +\n \"Available actions:\\n\" +\n \"v) View detail\\n\" +\n \"e) Edit\\n\" +\n \"d) Delete\\n\" +\n \"m) go back to Main menu\\n\" +\n \"q) quit\\n\")\n\n '''The process for capturing `print()` statements and redirecting to\n an accumulating object for later processing has the following steps:\n 1. import io and sys\n 2. in the test function, create a StringIO object\n (this is a buffer object that will be the destination for the\n redirected stdout)\n ```\n captured_output = io.StringIO()\n ```\n 3. point stdout at the capture object\n ```\n sys.stdout = captured_output\n ```\n 4. Run code as normal, any print() statement will go to\n the StringIO object instead of standard out\n 5. Revert stdout (will not affect the contents of the StringIO buffer)\n ```\n sys.stdout = sys.__stdout__\n ```\n 6. Run the rest of the code. The contents of the StringIO buffer can\n be accessed as follows:\n ```\n captured_output.getvalue()\n ```\n '''\n # Create a StringIO object to be a capture object\n captured_output = io.StringIO()\n # point stdout at the capture object\n sys.stdout = captured_output\n # Do anything that's going to have a print statement\n # (these will be accumulated in the captured_output object)\n example_input = 'q'\n with patch('builtins.input', side_effect=example_input):\n self.menu.present_results()\n\n # Revert stdout (captured_output still holds the captured items)\n sys.stdout = sys.__stdout__\n # Do any other test code (e.g., asserts)\n self.assertEqual(expected_output, captured_output.getvalue())", "def print_duplicates(non_uniques, ids, keys):\n for e in non_uniques:\n equiv_str = \"\"\n for id, key in zip(ids, keys):\n if id == e:\n equiv_str += key + \" <-> \"\n print(e + \" | \" + equiv_str[:-5])", "def showResult():\n logging.info('I am in showResult......')\n filename = \"../txt/%s_testall_%d_%d.txt\" % (const.DATASET_NAME,const.TOPIC_NUM,const.TOP_N)\n x = range(1,const.TOP_N,1)\n result = [[[] for i in range(5)] for i in range(const.METHOD_SIZE)]\n #read result from file to result\n if os.path.exists(filename):\n print '%s is existing......' % filename \n rFile = open(filename,\"r\")\n lines = rFile.readlines()\n for line in lines:\n line = line.rstrip('\\n')\n items = line.split(\"INFO:\")\n line = items[1]\n items = line.split(\":\")\n ids = items[0]\n values = items[1]\n idItems = ids.split(\">\")\n mid = int(idItems[0])\n topN = int(idItems[1])\n valueItems = values.split()\n result[mid][0].append(float(valueItems[0]))\n result[mid][1].append(float(valueItems[1]))\n result[mid][2].append(float(valueItems[2]))\n result[mid][3].append(float(valueItems[3]))\n result[mid][4].append(float(valueItems[4]))\n rFile.close()\n else:\n rFile = open(filename,\"w\")\n rFile.close()\n #if some method is not in file, recreate it\n for mid in range(const.METHOD_SIZE):\n if len(result[mid][0]) == 0:\n recalls,precisions,f1s,maes,rmses = getErrorOfRecMethod(mid)\n result[mid][0] = recalls\n result[mid][1] = precisions\n result[mid][2] = f1s\n result[mid][3] = maes\n result[mid][4] = rmses\n\n #plt img of comparing with pure method\n for index in range(5):\n plt.figure(index)\n indexName = util.getIndexName(index)\n mids = [const.ARIMA,const.SIMILAR,const.AVG]\n markerIndex = 0\n for mid in mids:\n if index == 1 or index == 2:\n plt.plot(x[10:],result[mid][index][10:],const.marker[markerIndex],label=util.getMethodName(mid))\n else:\n plt.plot(x,result[mid][index],const.marker[markerIndex],label=util.getMethodName(mid))\n markerIndex += 1\n plt.title(\"%s of Different Recommend Algorithms(Pure)\" % indexName)\n plt.xlabel(\"Number of recommendations\")\n plt.ylabel(indexName)\n plt.legend()\n plt.xlim(1,160)\n plt.savefig(\"../img/pure_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n #plt.show()\n\n #plt img of comparing with hybrid method\n for index in range(5):\n plt.figure(index+5)\n indexName = util.getIndexName(index)\n mids = [const.ARIMA,const.ARIMA_SIMILAR,const.ARIMA_AVG,const.ALL_HYBRID]\n markerIndex = 0\n for mid in mids:\n if index == 1 or index == 2:\n plt.plot(x[10:],result[mid][index][10:],const.marker[markerIndex],label=util.getMethodName(mid))\n else:\n plt.plot(x,result[mid][index],const.marker[markerIndex],label=util.getMethodName(mid))\n markerIndex += 1\n plt.title(\"%s of Different Recommend Algorithms(Hybrid)\" % indexName)\n plt.xlabel(\"Number of recommendations\")\n plt.ylabel(indexName)\n plt.legend()\n plt.xlim(1,160)\n plt.savefig(\"../img/hybrid_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n #plt.show()\n\n\n #plt img of comparing with sequential method\n for index in range(5):\n plt.figure(index+10)\n indexName = util.getIndexName(index)\n mids = [const.ARIMA,const.KNN,const.PATTERN,const.MARKOV,const.MARKOV_3]\n markerIndex = 0\n for mid in mids:\n if index == 1 or index == 2:\n plt.plot(x[10:],result[mid][index][10:],const.marker[markerIndex],label=util.getMethodName(mid))\n else:\n plt.plot(x,result[mid][index],const.marker[markerIndex],label=util.getMethodName(mid))\n markerIndex += 1\n plt.title(\"%s of Different Recommend Methods\" % indexName)\n plt.xlabel(\"Number of recommendations\")\n plt.ylabel(indexName)\n plt.legend()\n plt.xlim(1,160)\n plt.savefig(\"../img/seq_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n #plt.show()\n \n plt.figure(30)\n plt.plot(x,result[const.ARIMA_SIMILAR][3],'k-.',label=util.getMethodName(const.ARIMA_SIMILAR)) \n plt.plot(x,result[const.ARIMA_AVG][3],'k+',label=util.getMethodName(const.ARIMA_AVG)) \n plt.plot(x,result[const.ALL_HYBRID][3],'k',label=util.getMethodName(const.ALL_HYBRID)) \n plt.title(\"MAE of Hybrid Music Recommendation Methods\")\n plt.xlabel(\"Number of recommendations\")\n plt.ylabel(\"MAE\")\n plt.legend()\n plt.xlim(1,160)\n plt.savefig(\"../img/local_global_%s_%s_%d_%d.png\" % (const.DATASET_NAME,\"MAE\",const.TOPIC_NUM,const.TOP_N))\n\n plt.figure(31)\n plt.plot(x,result[const.ARIMA_SIMILAR][4],'k-.',label=util.getMethodName(const.ARIMA_SIMILAR)) \n plt.plot(x,result[const.ARIMA_AVG][4],'k+',label=util.getMethodName(const.ARIMA_AVG)) \n plt.plot(x,result[const.ALL_HYBRID][4],'k',label=util.getMethodName(const.ALL_HYBRID)) \n plt.title(\"RMSE of Hybrid Music Recommendation Methods\")\n plt.xlabel(\"Number of recommendations\")\n plt.ylabel(\"RMSE\")\n plt.legend()\n plt.xlim(1,160)\n plt.savefig(\"../img/local_global_%s_%s_%d_%d.png\" % (const.DATASET_NAME,\"RMSE\",const.TOPIC_NUM,const.TOP_N))\n\n plt.figure(19)\n improvement = []\n for i in range(len(result[const.ARIMA][1])):\n improvement.append((result[const.ARIMA][1][i]-result[const.KNN][1][i]) / result[const.KNN][1][i])\n plt.plot(x[10:],improvement[10:],'k',label='Improvement over UserKNN Recommender')\n plt.title('Average Precision Improvement over UserKNN Recommender')\n plt.xlabel('Number of recommendations')\n plt.ylabel('Improvement in Average Precision (times)')\n plt.legend()\n indexName = util.getIndexName(1)\n plt.savefig(\"../img/improvement_knn_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n\n plt.figure(20)\n improvement = []\n for i in range(len(result[const.ARIMA][1])):\n improvement.append((result[const.ARIMA][1][i]-result[const.KNN][1][i]) / result[const.KNN][1][i])\n plt.plot(x[10:],improvement[10:],'k',label='Improvement over UserKNN Recommender')\n plt.title('Average Precision Improvement over UserKNN Recommender')\n plt.xlabel('Number of recommendations')\n plt.ylabel('Improvement in Average Precision (times)')\n plt.legend()\n indexName = util.getIndexName(1)\n plt.savefig(\"../img/improvement_knn_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n\n plt.figure(21)\n improvement = []\n for i in range(len(result[const.ARIMA][2])):\n improvement.append((result[const.ARIMA][2][i]-result[const.KNN][2][i]) / result[const.KNN][2][i])\n plt.plot(x[10:],improvement[10:],'k',label='Improvement over UserKNN Recommender')\n plt.title('Average F1-Score Improvement over UserKNN Recommender')\n plt.xlabel('Number of recommendations')\n plt.ylabel('Improvement in Average F1-Score (times)')\n plt.legend()\n indexName = util.getIndexName(2)\n plt.savefig(\"../img/improvement_knn_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n #plt.show()\n logging.info('I am out showResult......')\n\n #plt img of comparing with pure method\n for index in range(5):\n plt.figure(index+50)\n indexName = util.getIndexName(index)\n print indexName\n mids = [const.ARIMA,const.SIMILAR,const.KNN,const.AVG,const.PATTERN,const.MARKOV]\n markerIndex = 0\n for mid in mids:\n if index == 1 or index == 2:\n plt.plot(x[10:],result[mid][index][10:],const.marker1[markerIndex],label=util.getMethodName(mid))\n else:\n plt.plot(x,result[mid][index],const.marker1[markerIndex],label=util.getMethodName(mid))\n markerIndex += 1\n plt.title(\"%s of Different Recommend Algorithms\" % indexName)\n plt.xlabel(\"Number of recommendations\")\n plt.ylabel(indexName)\n plt.legend()\n plt.xlim(1,160)\n plt.savefig(\"../img/all_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n\n #plt img of comparing with hybrid method\n for index in range(5):\n plt.figure(index+75)\n indexName = util.getIndexName(index)\n mids = [const.ARIMA,const.ALL_HYBRID]\n markerIndex = 0\n for mid in mids:\n if index == 1 or index == 2:\n plt.plot(x[10:],result[mid][index][10:],const.marker[markerIndex],label=util.getMethodName(mid))\n else:\n plt.plot(x,result[mid][index],const.marker[markerIndex],label=util.getMethodName(mid))\n markerIndex += 1\n plt.title(\"%s of Different Recommend Algorithms\" % indexName)\n plt.xlabel(\"Number of recommendations\")\n plt.ylabel(indexName)\n plt.legend()\n plt.xlim(1,160)\n plt.savefig(\"../img/hybrid_only_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n #plt.show()", "def display(self):\n print self.careErrors\n\n\n return self.exec_()", "def display_results_for_views(result):\n i = 0\n for r in result:\n print('\\t'+str(result[i][0]) + ' ---> '+str(result[i][1])+' views')\n i = i + 1", "def check():\n db.create_all()\n allEntries = Entry.query.all()\n return render_template('answers.html',result=allEntries)", "def report(self):\n print('total 1', len(self.videoids1))\n print('total 2', len(self.videoids2))\n print('total of repeats in_1', len(self.videoids_dict_repeats1))\n print('total of repeats in_2', len(self.videoids_dict_repeats2))\n print('total in_1_missing_in_2', len(self.in_1_missing_in_2))\n print('total in_2_missing_in_1', len(self.in_2_missing_in_1))", "def final_dup_check(cat):\n # Enforce chronological order\n cat.events.sort(key=lambda x: x.preferred_origin().time)\n dups = []\n others = []\n # Loop through and determine which of dups is detection and which is\n # template. Remove detection.\n for i, ev in enumerate(cat):\n if ev.preferred_origin().time - cat[i-1].preferred_origin().time < 2.:\n # Which is which\n if ev.creation_info.author == 'EQcorrscan':\n dups.append(ev)\n others.append(cat[i-1])\n print('Other event author: {}'.format(\n cat[i-1].creation_info.author))\n elif cat[i-1].creation_info.author == 'EQcorrscan':\n dups.append(cat[i-1])\n others.append(ev)\n print('Other event author: {}'.format(\n ev.creation_info.author))\n else:\n print('Neither')\n return dups, others", "def __printResults(files, expected, actual, similarity):\n if (showIndividualResults):\n for i in range(len(files)):\n print \"\\nExpected = %s\\nActual = %s \\nSimilarity = %f\" % (expected[i], actual[i], similarity[i])\n print \"\\nMean Similarity = %f\" % np.mean(similarity)", "def plot_results(self):\n experiment_utils.plot_exp_metric_comparison(self.experiments(reverse_sort=False))", "def report(self, results: dict):\n results = copy.deepcopy(results)\n results['status'] = 'done'\n results['time'] = str(dt.datetime.now())\n\n # insert or replace\n existing = self._query_by_dict(results)\n _id = None\n done_count = 0\n for item in existing:\n if item.get('status') == 'done':\n done_count += 1\n else:\n # Existing run, replace it\n _id = item['_id']\n\n for k in self._ignore_keys:\n if k in results:\n del results[k]\n\n # Existing one we overwrite it\n if _id:\n print(\"Replace: \", self._collect.replace_one({'_id': _id}, results, upsert=True).modified_count)\n else:\n # Else insert\n print(\"Inserted: \", self._collect.insert_one(results).inserted_id)\n\n # Check number we have finished\n if done_count:\n print(f\"[Warning Found {done_count} existing runs adding anyway]\")", "def test_duplicate_true_detections(self):\n expected_accuracy = dict(num_recall=10, uniq_recall=10, num_precision=20, uniq_precision=10)\n self._run_and_validate(self.duplicate_true_dets, self.ground_truths, expected_accuracy)", "def process_duplicate_rows(self):\n pass", "def print_summary(self):\n #outcomes = self.get_outcomes()\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\n #errors = 'Errors: %i' % sum(1 for outcome in outcomes if outcome == Result.ERROR)\n #fails = 'Fails: %i' % sum(1 for outcome in outcomes if outcome == Result.FAIL)\n print('')\n print ('Passes: %i' % self.get_pass_count())\n print ('Fails: %i' % self.get_fail_count())\n print ('Errors: %i' % self.get_error_count())\n print ('Untested: %i' % self.get_untested_count())\n print ('Skipped: %i' % self.get_skipped_count())", "def show_results(self):\n print(\"Survey results:\")\n for response in self.responses:\n print('- ' + response)", "def _print_results_header(self):\n print(\"\\033[94m\"+\"Summary\\n\"+\"-\"*32+\"\\033[0m\")\n print(\"Subroutine: {}\".format(self.mc_sample.__name__))\n print(\"Num Runs: {:2.1e}\".format(self.num_runs))\n print(\"-\"*32+'\\n')", "def print_multi_result(self, result: MultiResult, host: str) -> Panel:\n results = [\n self.print_result(r)\n for r in result\n if r.severity_level >= self.severity_level\n ]\n panel = Panel(\n Columns(results, **self.columns_settings),\n title=f\"{host} | {result.name}\",\n style=\"red\" if result.failed else \"green\",\n )\n return panel", "def print_aggregated_result(self, result: AggregatedResult) -> Panel:\n mulit_results = [\n self.print_multi_result(result=mulit_result, host=host)\n for host, mulit_result in result.items()\n ]\n columns = Columns(mulit_results, **self.columns_settings)\n panel = Panel(\n columns, title=result.name, style=\"red\" if result.failed else \"green\"\n )\n return panel", "def list_should_not_contain_duplicates(self,list_,msg=None):\r\n if not isinstance(list_,list):\r\n list_= list(list_)\r\n dupes = []\r\n for item in list_:\r\n if item not in dupes:\r\n count = list_.count(item)\r\n if count >1:\r\n logger.info(\" '%s' found %d times\" %(item,count))\r\n dupes.append(item)\r\n if dupes:\r\n raise AssertionError(msg or '%s found multiple times' %seq2str(dupes))", "def check_duplicates(filename=None):\r\n i=0\r\n for line in open(filename):\r\n a=line.split(\" \")\r\n i=i+1\r\n b=set(a[:])\r\n for item in b:\r\n if a.count(item) > 1:\r\n print \"line\",i,\"---->\",item\r\n print \"end\"", "def list_dups(exproot, **kwargs):\n seen_args = []\n seen_names = []\n for jobname, args, results in load_all(exproot):\n if args in seen_args:\n print jobname, 'is dup of', seen_names[seen_args.index(args)]\n elif args != None:\n seen_args.append(args)\n seen_names.append(jobname)", "def summarize(self):\n # NOTE: should be moved to abstract superclass\n failcount = len(self.mosaictrees) - len(self)\n msg = \"Parsed %i mosaics from the FluoView project.\\n\\n\" % len(self)\n if failcount > 0:\n msg += (\n \"\\n==== WARNING ====== WARNING ====\\n\\n\"\n \"Parsing failed on %i mosaic(s). Missing files?\\n \"\n \"\\n==== WARNING ====== WARNING ====\\n\\n\\n\" % failcount\n )\n for mos in self:\n msg += \"Mosaic %i: \" % mos.supplement[\"index\"]\n msg += \"%i x %i tiles, \" % (mos.dim[\"X\"], mos.dim[\"Y\"])\n msg += \"%.1f%% overlap.\\n\" % mos.get_overlap()\n return msg", "def summarize(self):\n # NOTE: should be moved to abstract superclass\n failcount = len(self.mosaictrees) - len(self)\n msg = \"Parsed %i mosaics from the FluoView project.\\n\\n\" % len(self)\n if failcount > 0:\n msg += (\n \"\\n==== WARNING ====== WARNING ====\\n\\n\"\n \"Parsing failed on %i mosaic(s). Missing files?\\n \"\n \"\\n==== WARNING ====== WARNING ====\\n\\n\\n\" % failcount\n )\n for mos in self:\n msg += \"Mosaic %i: \" % mos.supplement[\"index\"]\n msg += \"%i x %i tiles, \" % (mos.dim[\"X\"], mos.dim[\"Y\"])\n msg += \"%.1f%% overlap.\\n\" % mos.get_overlap()\n return msg", "def print_results(results):\n data = []\n for idx in results.index:\n item = results.loc[idx]\n row = (str(item.gid), \n re.sub(r'\\n', ' ', item.creators),\n re.sub(r'[\\n\\r]+', ' ', item.title), \n gut_utf8.format(item.gid))\n data.append(row)\n _ = [print('|'.join(row)) for row in data]\n print('#', len(data), \"items returned\")\n return data", "def show_results(ninja_id):\n query = \"SELECT \" # get the ninja based on ninja id\n data = {} # ninja id\n mysql = # connect to mysql \n ninja = mysql.query_db(query, data)\n return render_template ('submitted_info.html', ninja = ninja) # [{ninja_data: \"stuff\"}]", "def generate_duplicate_pdf(self):\n num_dup = 1\n prob_sum = 0.0\n prob_list = [(num_dup, prob_sum)]\n max_dups = self.duplicate_cfg[\"Max_duplicate\"]\n uniform_val = 1.0 / float(max_dups)\n\n for i in range(max_dups - 1):\n num_dup += 1\n prob_list.append((num_dup, uniform_val + prob_list[-1][1]))\n return prob_list", "def test_duplicate(self):\n test_file = os.path.join(INPUT_HYPM_PATH, 'unit_364-2013-225-1-0.mdd')\n\n mdd.procall([test_file])\n \n self.compare_node58()", "def find_duplicates():\n return AppServer.service.find_duplicated_files()", "def _render_results_group_operations(self):\n\n core.add_text(\n name='Group operations',\n color=self._control_text_color,\n parent=self._window_name)\n\n core.add_button(\n 'Keep newest file, delete all other duplicates', \n callback=self._delete_all_duplicate_click_hander,\n callback_data=self._duplicates_list,\n parent=self._window_name) \n\n core.add_text('', parent=self._window_name)\n\n core.add_separator(parent=self._window_name)", "def test_check_sampleid_duplicates(self):\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_sampleid_duplicates(header, mapping_data, errors)\r\n # Should not find duplicates\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s-1', 'CGTA', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_sampleid_duplicates(header, mapping_data, errors)\r\n # Should find duplicates\r\n expected_errors = [\r\n 'Duplicate SampleID s-1 found.\\t1,0',\r\n 'Duplicate SampleID s-1 found.\\t2,0']\r\n\r\n self.assertEqual(errors, expected_errors)", "def _print_summary(results):\n if not len(results) > 0:\n print 'No results to show in summary.'\n return\n\n table = {}\n for res in results:\n for k, v in res.iteritems():\n table.setdefault(k, []).append(v)\n print tabulate(table, headers='keys', tablefmt=\"simple\")", "def showAllResults(self):\n \n names = self.jobstable.get_selectedRecordNames()\n if len(names)==1:\n ax,mh,x,y=self.showResults()\n\n else:\n tx=[]; ty=[]\n import pylab as plt\n f=plt.figure(figsize=(8,8))\n ax=f.add_subplot(111)\n for n in names:\n a,mh,x,y = self.showResults(n,showtable=False, ax=ax,stats=False)\n tx.extend(x)\n ty.extend(y) \n ax.legend()\n #add stats for summary\n from Correlation import CorrelationAnalyser\n C = CorrelationAnalyser() \n C.addStats(ax,tx,ty)\n f.show()\n \n return", "def print_results(results):\n print(\"\\033[4m\\033[1m%-75s%s\\033[0m\" % (\"NAME\", \"ADDRESS\"))\n\n for selections in data:\n print(\"%-75s%s\" % (selections['applicant'], selections['location']))\n \n print(\"\\n\\033[1m--- PAGE \", page_num, \"---\\033[0m\\n\")", "def printResults(self):\n for key in self.mDict.keys():\n print ('for {:d}, entries = {:d} and exits = {:d}'.format (key, self.mDict.get(key).get ('entries'), self.mDict.get(key).get ('exits')))", "def print_pair(self, record_pair):\n\n for pair_element in record_pair:\n for field in pair_element:\n line = \"%s : %s\" % (field, pair_element[field])\n print(line)\n print(\"\\n\")\n\n n_match = len(self.labeled_examples['match'])\n n_distinct = len(self.labeled_examples['distinct'])\n print(\"{0}/10 positive, {1}/10 negative\".format(n_match, n_distinct))\n\n print('Do these records refer to the same thing?')\n print('(y)es / (n)o / (u)nsure / (f)inished\\n')", "def print_result(self, header, result, suffix=\"views\"):\n print(header)\n for i in range(len(result)):\n print(' ', i + 1, '.', result[i][0], '--', result[i][1], suffix)\n print(\"\\n\")", "def __delete_duplicates(self):\n log = logging.getLogger()\n log.debug(\"\\n---> Duplicate check <---\")\n\n chromosomes = list(set(self.chromosomes))\n diff = self.size - len(chromosomes)\n\n if diff > 0:\n log.debug(\"---> Duplicate(s) found! <---\")\n for i in range(diff):\n chromosomes.append(\n Chromosome(self.__generate_random_gene_sequence(),\n self.environment))\n else:\n log.debug(\"---> No duplicates found! <---\")\n\n self.chromosomes = chromosomes", "def test_create_unique_files(self):\n fitting_report.create(results=self.results,\n support_pages_dir=self.dir.name,\n options=self.options)\n\n file_names = sorted([r.fitting_report_link\n for r in self.results])\n\n unique_names = sorted(list(set(file_names)))\n\n self.assertListEqual(unique_names, file_names)", "def _results_debug_message(self):\n result = 'bisector.lkgr: %r\\n' % self.lkgr\n result += 'bisector.fkbr: %r\\n\\n' % self.fkbr\n result += self._revision_value_table()\n if (self.lkgr and self.lkgr.values and self.fkbr and self.fkbr.values):\n result += '\\n' + self._t_test_results()\n return result", "def report(self):\n print \"Got {} places from Wikipedia.\".format(len(self.places))\n print \"Got {} existing places.\".format(len(self.existing))\n print \"Found {} missing places:\".format(len(self.missing))\n print '\\n'.join(sorted(self.missing, key=lambda miss: miss[0]))\n print \"Found {} missing articles:\".format(len(self.missing_articles))\n print '\\n'.join(sorted(self.missing_articles, key=lambda miss: miss[0]))", "def handle_duplicates(self, database):\n number_of_duplicates = 0\n number_of_merged = 0\n if not database.session:\n logger.error(\"no database session\")\n return (number_of_duplicates, number_of_merged)\n\n # return if this workout already has been checked\n if self.is_duplicate_with or self.manual_check_required_with:\n logger.debug(\"dup check - no check, since this workout is marked: {}\".format(self))\n return (number_of_duplicates, number_of_merged)\n\n # return if this workout does not have start_time set, since the following checks are based on it\n if not self.start_time or not self.duration_sec:\n return (number_of_duplicates, number_of_merged)\n\n # potential duplicate if time is overlapping\n # this workout |-----------------|\n # 1st potential duplicate in db |-----------------|\n # 2nd potential duplicate in db |------------------------|\n # 3rd potential duplicate in db |----------------|\n # 4th potential duplicate in db |---------|\n # (Remark to line 2 of 1st filter: needed to use database functions, \n # because modifiers like timedelta do not work with sqlalchemy sql attributes)\n # TODO handle timezones (needed for sqlite strftime)\n duplicates = database.session.query(Workout)\\\n .filter(or_(and_(Workout.start_time < self.start_time,\n func.strftime('%s', Workout.start_time, 'utc') + Workout.duration_sec >= self.start_time.timestamp()),\n and_(Workout.start_time >= self.start_time,\n Workout.start_time < (self.start_time + datetime.timedelta(seconds=int(self.duration_sec))))))\\\n .filter(Workout.is_duplicate_with == None)\\\n .filter(Workout.manual_check_required_with == None)\\\n .all()\n\n if len(duplicates) <= 1: \n return (number_of_duplicates, number_of_merged)\n\n # find overlapping workouts of different sports -> set manual_check_required_with\n for duplicate in duplicates:\n if duplicate.sport_id != self.sport_id:\n self.manual_check_required_with = duplicate.id\n logger.debug(\"dup check - workout marked to be checked: {}\".format(duplicate))\n duplicates.remove(duplicate)\n if len(duplicates) <= 1: \n return (number_of_duplicates, number_of_merged)\n\n # find overlapping workouts of same sports (they are duplicate workouts) -> now find the leading workout\n leading_workout = None\n # Step 1: if one of the duplicates is a previously merged one, use it as the leading workout\n for duplicate in duplicates:\n if duplicate.source and duplicate.source == \"MERGED WORKOUT\":\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 1: {}\".format(leading_workout))\n break\n # Step 2: else if one of the duplicates is from Zwift, prefer it as the leading workout\n if not leading_workout:\n for duplicate in duplicates:\n if duplicate.name and \"Zwift\" in duplicate.name:\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 2: {}\".format(leading_workout))\n break\n # Step 3: else if one of the duplicates is a Garmin import, prefer it as the leading workout\n if not leading_workout:\n for duplicate in duplicates:\n if duplicate.source and \"Garmin\" in duplicate.source:\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 3: {}\".format(leading_workout))\n break\n # Step 4: else use this workout as the leading workout\n if not leading_workout:\n leading_workout = self\n logger.debug(\"Found leading workout in step 4: {}\".format(leading_workout))\n\n # create a new workout that will be treated as the leading one. Mark the duplicates \n if leading_workout.source == \"MERGED WORKOUT\":\n merged_workout = leading_workout\n else:\n merged_workout = Workout(source=\"MERGED WORKOUT\", external_id=datetime.datetime.now().timestamp())\n number_of_merged += 1\n merged_workout._merge_attributes(leading_workout)\n logger.debug(\"dup check - merged workout with leading: {}\".format(merged_workout))\n merged_workout.add(database)\n leading_workout.is_duplicate_with = merged_workout.id\n number_of_duplicates += 1\n\n for duplicate in duplicates:\n if duplicate is leading_workout:\n # already merged above\n continue\n if duplicate.is_duplicate_with == merged_workout.id:\n # already merged\n continue\n merged_workout._merge_attributes(duplicate)\n logger.debug(\"dup check - merged workout duplicate: {}\".format(merged_workout))\n duplicate.is_duplicate_with = merged_workout.id\n number_of_duplicates += 1\n logger.debug(\"dup check - duplicate workout marked: {}\".format(duplicate))\n\n return (number_of_duplicates, number_of_merged)", "def unique_ssh_results(results):\n r = {}\n for k in results:\n r[results[k][0]] = True\n return r.keys()", "def generate_clarification(self, response, ntuple):\n for (cname, clarification) in self.clarification_templates.items():\n condition = clarification['condition']\n is_valid = True\n for (key, value) in condition.items():\n if self.data.get_data(ntuple['sid'], key) != value:\n # print(\"User is {} and condition is {}\".format(\n # self.data.get_data(ntuple['sid'], key), value))\n is_valid = False\n break\n if is_valid:\n response['clarification'] = clarification\n return", "def _get_non_unique_error(self, queryset):\n non_unique = {}\n for field in self._invoice_report_common_fields:\n items = queryset.values(field).distinct()\n if items.count() != 1:\n if field == 'invoice_date':\n data = ', '.join(\n item[field].strftime(\"%Y-%m-%d\")\n for item in items if item[field]\n )\n else:\n data = ', '.join(\n item[field] for item in items if item[field]\n )\n non_unique[field] = data\n non_unique_items = ' '.join([\n '{}: {}'.format(key, value)\n for key, value in non_unique.items() if value\n ])\n return '{}: {}'.format(\n _('Selected items have different'), non_unique_items,\n )", "def output_test():\n\toutput_comparison_page(TEST_EVENT_LIST, TEST_COMPARISON_PAGE_FILEPATH)", "def _draw_duplicates_set(self, duplicate_images_list: FileMetaDataList) -> None:\n \n try:\n\n file_path = duplicate_images_list[0].GetPath()\n\n core.add_drawing(\n file_path, \n width=100, \n height=100, \n parent=self._window_name)\n\n core.draw_image(\n file_path, \n file_path, \n [0, 0], \n pmax=[100, 100],\n uv_min=[0, 0], \n uv_max=[1, 1], \n tag=\"image\")\n\n for file in duplicate_images_list:\n \n file_path = file.GetPath()\n\n core.add_button(\n 'Delete ##'+file_path, \n callback=self._delete_file_button_click_handler, \n callback_data=file,\n parent=self._window_name)\n\n core.add_same_line(parent=self._window_name)\n\n core.add_text(\n file_path, \n parent=self._window_name)\n\n core.add_separator(parent=self._window_name)\n\n except Exception as e:\n core.log_error('View::_draw_duplicates_set - Exception : [%s]' % (e))", "def listDuplicate(self,permutations=True):\n ind,ok = self.testDuplicate(permutations)\n return ind[~ok]", "def result_printer(self):\n for i in self.output:\n for item, value in i.items():\n if not isinstance(value, list) and \"http://\" not in value:\n print(f\"{item} : {value}\")\n print(20 * '-')", "def seq_dup_levels_plot (self):\n if 'seq_dup_levels' not in self.fastqc_data or len(self.fastqc_data['seq_dup_levels']) == 0:\n log.debug('seq_dup_levels not found in FastQC reports')\n return None\n \n pconfig = {\n 'id': 'fastqc_seq_dup_levels_plot',\n 'title': 'Sequence Duplication Levels',\n 'categories': True,\n 'ylab': '% of Library',\n 'xlab': 'Sequence Duplication Level',\n 'ymax': 100,\n 'ymin': 0,\n 'yMinTickInterval': 0.1,\n 'colors': self.get_status_cols('seq_dup_levels'),\n 'tt_label': '<b>{point.x}</b>: {point.y:.1f}%',\n }\n \n self.sections.append({\n 'name': 'Sequence Duplication Levels',\n 'anchor': 'fastqc_seq_dup_levels',\n 'content': '<p>The relative level of duplication found for every sequence. ' +\n 'See the <a href=\"http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/8%20Duplicate%20Sequences.html\" target=\"_bkank\">FastQC help</a>.</p>' +\n self.plot_xy_data(self.fastqc_data['seq_dup_levels'], pconfig)\n })", "def display_results(summary):\n print ('Total running time %.2f secs (includes DB checks)'\n % summary.total_time)\n\n print 'OK:', summary.ok\n print 'Errors:', summary.errors\n\n # Display stats\n print 'Changes stats:'\n for var, s in summary.stats.iteritems():\n print '\\t%s:' % var,\n for x in s.iteritems():\n print '%s=%.2f' % x,\n print\n\n # Display profiling data\n print 'Profiling data:'\n for name, data in summary.profile.iteritems():\n print '\\t%s: %d calls, %.2fms' % (name, data['callcount'],\n data['time'] * 1000)", "def display_nornir_results(nr_result):\n print(menu('Interface Update'))\n for host in nr_result:\n if not nr_result[host].failed:\n if nr_result[host].changed:\n print(Fore.YELLOW + f'{host}: True')\n else:\n print(Fore.GREEN + f'{host}: False')\n else:\n print(Fore.RED + f'{host}: FAILED')", "def final_result(self):\r\n print(\" Game \\t\\t Word \\t\\t Result \\t\\t Bad Guess \\t\\t Missed Letters \\t\\t Score \")\r\n print(\" ---- \\t\\t ---- \\t\\t ------ \\t\\t --------- \\t\\t -------------- \\t\\t ----- \")\r\n count = 0\r\n final_score = 0\r\n for x in self.instances:\r\n count += 1\r\n print(\" \"+str(count)+\" \\t\\t \"+str(x.get_word())+\" \\t\\t \"+str(x.get_result())+\" \\t\\t \"+str(x.get_wrong_guess())+\" \\t\\t\\t \"+str(x.get_wrong_letter())+\" \\t\\t\\t \"+str(round(x.get_score(),3)))\r\n final_score += x.get_score()\r\n\r\n print(\"\\nFinal Score : \"+str(round(final_score,3)))", "def report():\n\n global COUNTER\n \n if COUNTER > 0:\n print('\\n\\n')\n\n print('Searched {0} files'.format(SEARCHED))\n print('Found {0} TODOs in {1} files'.format(COUNTER, F_COUNTER))", "def check_duplicate(self, state):\n pass", "def show_item(self, folder, index, is_same):\n path_1, path_2, _, _, _ = self.pairs[index]\n img1 = cv.imread(path_1)\n img2 = cv.imread(path_2)\n text = 'same'\n if not is_same:\n text = 'diff'\n\n if self.use_landmarks:\n landmarks1 = np.array(self.landmarks[path_1[path_1.rfind('/')+1:]]).reshape(-1)\n landmarks2 = np.array(self.landmarks[path_2[path_2.rfind('/')+1:]]).reshape(-1)\n img1 = FivePointsAligner.align(img1, landmarks1)\n img2 = FivePointsAligner.align(img2, landmarks2)\n else:\n img1 = cv.resize(img1, (400, 400))\n img2 = cv.resize(img2, (400, 400))\n cv.imwrite(osp.join(folder, 'misclassified_{}_{}.jpg'.format(text, index)), np.hstack([img1, img2]))", "def Display(self, unused_args, result):\n util.PrettyPrint(result)", "def Display(self, unused_args, result):\n util.PrettyPrint(result)", "def test_identify_duplicates_1(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"L5\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)", "def results(self, checkid):\r\n return results.Results(self, checkid)", "def make_comparison_plot(args, res, keys, min_length):\n directory = args.directory\n\n # Build the plot.\n fig, ax = plt.subplots(figsize=(args.figSizeX, args.figSizeY))\n\n # Stack the results groups, thus, each must be the same shape.\n sns.tsplot(data = np.stack(res, axis=2), condition=keys, ax=ax, ci=[68, 95])\n \n # Save the plot.\n ax.set_title('Average Return by Group, N=' + str(min_length), fontsize=18)\n ax.set_xlabel('Bin', fontsize=18)\n ax.set_ylabel('Average Return', fontsize=18)\n ax.legend(fontsize=18)\n plt.tick_params(axis='both', which='major', labelsize=18)\n ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n plt.savefig(os.path.join(directory, 'group_comparison.png'), \n bbox_inches='tight')", "def print_analysis(self,version,results,tests,test_details,test_order,\n output_dir,diffs_only):\n def format_result(r):\n return '%s %s' % (r.outcome,r.get_cause())\n\n main_template = makolookup.get_template(\"main.mako\")\n detail_template = makolookup.get_template(\"detail.mako\")\n\n f = open(os.path.join(output_dir,'index.html'),'w')\n try:\n f.write(main_template.render(version=version,results=results,tests=tests,\n test_details=test_details,test_order=test_order,\n time2datetime=time2datetime))\n finally:\n f.close()\n\n for test_id,test_detail in test_details.items():\n #print ('Detail: %s' % test_id)\n f = open(os.path.join(output_dir,test_id+'.html'),'w')\n try:\n f.write(detail_template.render(version=version,test_id=test_id,\n test_detail=test_detail,diffs_only=diffs_only))\n except:\n f.write(\"Error while processing output.\")\n finally:\n f.close()", "def _display_related_alerts(self):\n if self.check_valid_result_data(\"related_alerts\", silent=True):\n display(self._last_result.related_alerts)\n else:\n nb_markdown(f\"No Alerts related to {self.url}\")", "def print_results(self, data: SimData) -> None:\n pass", "def show_msg(self):\n if self.result and self.success_msg:\n print color_str('g', '\\n'.join(self.success_msg))\n elif self.result == False and self.fail_msg:\n print color_str('r', '\\n'.join(self.fail_msg))\n if self.stat_msg:\n print color_str('b', '\\n'.join(self.stat_msg))" ]
[ "0.6287719", "0.5866031", "0.58241785", "0.57221144", "0.56531405", "0.56339365", "0.56136125", "0.5593977", "0.55925065", "0.55140984", "0.5512862", "0.5487937", "0.5483856", "0.5471412", "0.5454186", "0.53893715", "0.5378345", "0.53769517", "0.5371023", "0.5360018", "0.5356362", "0.5355224", "0.5293162", "0.52846473", "0.5282217", "0.5282178", "0.524647", "0.52459943", "0.52421564", "0.5233009", "0.52311325", "0.522325", "0.5198191", "0.5195463", "0.5189969", "0.518483", "0.51836777", "0.5172754", "0.51676506", "0.5166553", "0.5164758", "0.51577455", "0.51492846", "0.51449966", "0.51441103", "0.5142286", "0.5140707", "0.51317674", "0.51292264", "0.512607", "0.51177925", "0.51122713", "0.5105811", "0.5105796", "0.5104688", "0.51022714", "0.50984514", "0.50969195", "0.50969195", "0.50952876", "0.50936645", "0.5092719", "0.5089113", "0.50874233", "0.50862277", "0.5072129", "0.50639623", "0.5055669", "0.5052176", "0.50481117", "0.5043046", "0.5030147", "0.50260216", "0.5023378", "0.50192916", "0.50176984", "0.50149035", "0.5010011", "0.50067", "0.499802", "0.49957073", "0.49945125", "0.49892482", "0.49847478", "0.4980964", "0.49805123", "0.4976242", "0.49760652", "0.497585", "0.49679163", "0.49669454", "0.49653667", "0.49653667", "0.49603713", "0.4960091", "0.49561766", "0.49537173", "0.49530825", "0.49472958", "0.49451676" ]
0.67122084
0
Load a CSV file into a dict with the first column as the key.
def load_csv_to_dict(filename): row_len = list() result = dict() with open(filename, 'r') as csvfile: reader = csv.reader(csvfile) for row in reader: key = row[0].strip() values = [v.strip() for v in row[1:]] result[key] = values row_len.append(len(values)) return result, max(row_len)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_to_dict(filename):\n\twith open(filename, 'r') as in_hndl:\n\t\tindict = [i for i in csv.DictReader(in_hndl)]\n\treturn indict[0]", "def save_csv_into_dictionary(csv_file):\n\n dictionary = OrderedDict()\n with open(csv_file, newline='') as file:\n reader = csv.reader(file)\n for row in reader:\n dictionary[row[0]] = row[1]\n return dictionary", "def load_csv(file):\n with open(file) as csvfile:\n reader = csv.DictReader(csvfile)\n return [dict(row) for row in reader]", "def creating_dict_from_csv(self) -> dict:\n dictionary = {}\n for row in self.__read_csv():\n if dictionary.get(row[0]):\n dictionary[row[0]].append((row[1], row[2]))\n else:\n dictionary[row[0]] = [(row[1], row[2])]\n\n for key, value in dictionary.items():\n dictionary[key] = sorted(value, key=lambda x: x[1], reverse=True)\n\n return dictionary", "def csvToDict(filepath):\n data = []\n with open(getcwd() + filepath, 'r') as dataset:\n assert csv.Sniffer().has_header(dataset.read(9999)), 'No headers'\n dataset.seek(0)\n dialect = csv.Sniffer().sniff(dataset.read(99999))\n dataset.seek(0)\n reader = csv.DictReader(dataset, dialect=dialect)\n headers = reader.fieldnames\n for row in reader:\n data.append(row)\n\n data = assert_data_format(data)[0]\n\n return data, headers", "def csv2dicts(csvfile, names=None):\n data = []\n for row_index, row in enumerate(csvfile):\n if row_index == 0:\n if names:\n keys = names\n else:\n keys = row\n print(keys)\n continue\n data.append({key: value for key, value in zip(keys, row)})\n return data", "def csv_to_dict(fp):\n import pandas as pd\n df = pd.read_csv(fp, index_col=0, header=None)\n d = df.to_dict(orient='index')\n d = {k: v.values() for k, v in d.iteritems()}\n return d", "def dict_from_file(path, key='id', dialect='excel-tab'):\n if not os.path.exists(path):\n raise ValueError(\"File not found: {}\".format(path))\n reader = csv.DictReader(open(path), dialect=dialect)\n return dict([(x[key], x) for x in reader])", "def dictparse(csvfilename, keyfield):\n table = {}\n with open(csvfilename, \"rt\", newline='') as csvfile:\n csvreader = csv.DictReader(csvfile,\n skipinitialspace=True)\n for row in csvreader:\n table[row[keyfield]] = row\n return table", "def csv_to_dict(filename):\n data_list = []\n \n with open(filename, 'rb') as datafile:\n data_reader = csv.DictReader(datafile, delimiter = ',')\n for row in data_reader:\n data_list.append(row)\n\n return data_list", "def load_csv(input):\n with open(input['csv'], 'r', encoding=input['encoding']) as f:\n invoice_dict = dict()\n reader = csv.reader(f, delimiter=';')\n\n for row in reader:\n invoice_id = row[0]\n\n if invoice_id in invoice_dict:\n invoice_dict[invoice_id].add_entry(row[1:])\n else:\n invoice_dict[invoice_id] = Invoice(row)\n\n return invoice_dict", "def read_csv_as_dicts(csv_input_file_name):\n input_table = read_csv_as_table(csv_input_file_name, skip_first_line=False)\n\n # first line should contain headers\n header = input_table[0]\n # rest lines would contain actual data\n data = input_table[1:]\n\n output = []\n # process all lines with data\n for input_line in data:\n record = {}\n for i in range(len(header)):\n record[header[i]] = input_line[i]\n output.append(record)\n return output", "def _csv_to_dict(name):\n csv_path = _get_csv_path(name)\n result = []\n with open(csv_path) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n result.append(row)\n return result", "def dictparse(csvfilename, keyfield, separator, quote, quotestrategy):\n table = {}\n with open(csvfilename, \"rt\", newline='') as csvfile:\n csvreader = csv.DictReader(csvfile,\n skipinitialspace=True,\n delimiter=separator,\n quotechar=quote,\n quoting=quotestrategy)\n for row in csvreader:\n table[row[keyfield]] = row\n return table, csvreader.fieldnames", "def dictparse(csvfilename, keyfield, separator, quote, quotestrategy):\n table = {}\n with open(csvfilename, \"rt\", newline='') as csvfile:\n csvreader = csv.DictReader(csvfile,\n skipinitialspace=True,\n delimiter=separator,\n quotechar=quote,\n quoting=quotestrategy)\n for row in csvreader:\n table[row[keyfield]] = row\n return table, csvreader.fieldnames", "def csv_to_dict(csvfile, delimiter=\",\", quotechar='\"'):\n reader = csv.DictReader(csvfile, delimiter=delimiter, quotechar=quotechar)\n\n data = {each: [] for each in reader.fieldnames}\n for i, row in enumerate(reader):\n for key, value in row.items():\n data[key].append(value)\n return data", "def get_dictionary_from_csv(file):\n csv_file = file[:-4] # avoid .txt extension\n csv_file += \"_dico.csv\"\n dic = pd.read_csv(csv_file, delimiter=',')\n return list(dic.columns)", "def read_name_map( name_map_path) :\n with open( name_map_path, newline=\"\") as csvfile:\n table = { }\n reader = csv.reader(csvfile)\n for row in reader:\n if len(row) < 2:\n continue\n if row[key_col] == key_header:\n continue\n key = row[key_col]\n val = row[val_col]\n table[key] = val\n return table", "def read_dictionary(filename):\r\n dictionaryoutput = {}\r\n with open(filename) as file:\r\n entries = csv.reader(file)\r\n for item in entries:\r\n dictionaryoutput[item[0]] = item[1]\r\n return dictionaryoutput", "def csv_to_dict(self):\n log = logger.configure(\"default\")\n try: \n df = pd.read_csv(self.__csv_path)\n except IOError as e:\n # file not found\n log.error('Could not import {}. Got error {}'.format(self.__csv_path, e))\n raise \n else:\n cols = list(df.columns)\n metafield_cols = [col for col in cols if 'metafields' in col]\n if metafield_cols == [] or 'Handle' not in cols:\n # relevant columns don't exist\n log.error('{} does not contain `Handle` or `metafields` named columns'.format(self.__csv_path))\n raise\n else:\n new_cols = ['Handle'] + metafield_cols\n df = df[new_cols].set_index('Handle')\n df = df[~df.index.duplicated(keep='first')]\n return df.to_dict('index')", "def parse_csv_input_file(input_file):\n with open(input_file) as csvfile:\n reader = csv.DictReader(csvfile)\n for item in reader:\n dict = {i: x for i, x in item.items()}\n yield(dict)", "def read_into_dictionary(input_file):\n logger.debug(\"%s %s (%s)...\" % (LOG_INDENT, inspect.stack()[0][3], input_file))\n\n input_file_suffix = (pathlib.Path(input_file).suffix)\n ret_dict = {}\n if input_file_suffix == '.csv':\n logger.debug(\"%s opening file [%s]\" % (LOG_INDENT,input_file))\n reader = csv.reader(open(input_file, 'r'))\n for row in reader:\n # read in and strip of comments / blank lines etc..\n variable_name = row[0].strip()\n variable_value = row[1].strip()\n if not variable_name:\n continue\n if variable_name.startswith('#') or variable_value.startswith('#'):\n continue\n logger.debug(\"%s %s=%s\" % (LOG_INDENT,variable_name,variable_value))\n # save in dictionary\n ret_dict[variable_name] = variable_value\n return ret_dict", "def readcsv(csvfile):\n logger = log.getLogger('obslog.readcsv')\n\n if not os.path.exists(csvfile):\n logger.error('Cannot access %s', csvfile)\n raise SystemExit\n\n data = {}\n with open(csvfile, mode='r') as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n data[row['FITSFILE']] = row\n logger.debug('Data: %s', data)\n return data", "def load(filename):\n with open(filename,'r') as fd:\n csv_in = csv.reader(fd, delimiter=',', quotechar='\"')\n keys = csv_in.next()\n data = {k:[] for k in keys}\n for row in csv_in:\n for k,v in zip(keys,row):\n data[k].append(float(v))\n return data", "def read_file(filename):\n\n data = {}\n with open(filename, encoding=\"utf8\") as file:\n reader = csv.DictReader(file)\n for line in reader:\n data[line['id']] = line\n return data", "def csv2dict(filename):\n dis_dict = {}\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n el_a = row[\"Element Name\"]\n dis_dict[el_a] = {}\n for entry in row:\n if entry != \"Element Name\":\n dis_dict[el_a][entry] = float(row[entry])\n csvfile.close()\n return dis_dict", "def import_data(fname, rowsToRead):\n with open(filepath, 'r') as f:\n reader = csv.reader(f, delimiter=\",\")\n headers = next(reader)[1:]\n data_dict = defaultdict(list)\n for row in islice(reader, rowsToRead):\n for index, key in enumerate(headers):\n data_dict[key].append(row[index + 1])\n return data_dict", "def load_csv(file):\n import csv\n reader = csv.reader(open(file, 'r'))\n columns = reader.next()\n c2i = dict((columns[i], i) for i in range(len(columns)))\n data = {}\n excluded = set([REP_CSV_HED_TIM, REP_CSV_HED_HER])\n for row in reader:\n \n # get relevant info from the line\n time = float(row[c2i[REP_CSV_HED_TIM]])\n hero = row[c2i[REP_CSV_HED_HER]]\n other = dict((c, REP_CSV_HANDLERS.get(c, REP_CSV_DEFHANDLER)(row[c2i[c]])) for c in columns if c not in excluded)\n \n # add to the data dictionary\n if hero not in data: data[hero] = []\n data[hero].append([time] + [other])\n \n return data", "def csv_dict_reader(file_path):\r\n with open(file_path, 'r') as file_obj:\r\n\r\n reader = csv.DictReader(file_obj, delimiter=',')\r\n for line in reader:\r\n #print(line[\"variable_name \"]),\r\n print(line[\"dataset\"])", "def readCSV(filename, separator):\n \n filetoread = open(filename, \"r\")\n lines = []\n for line in filetoread:\n line = line.replace(\"\\n\", \"\").split(separator)\n lines.append(line)\n keys, values = lines[0], lines[1]\n dictionary = {}\n for i in range(0,len(keys)):\n try:\n dictionary[keys[i]] = int(values[i])\n except:\n dictionary[keys[i]] = values[i]\n return dictionary", "def load_csv(filepath):\n log.debug('Loading csv')\n with open(filepath) as csvfile:\n reader = csv.DictReader(csvfile)\n return reader.fieldnames, list(reader)", "def read_reverse_dictionary(filename):\r\n dictionaryoutput = {}\r\n with open(filename) as file:\r\n entries = csv.reader(file)\r\n for item in entries:\r\n dictionaryoutput[item[1]] = item[0]\r\n return dictionaryoutput", "def load_data(filename):\n data = dict()\n with open(filename) as f:\n reader = csv.DictReader(f)\n for row in reader:\n name = row[\"name\"]\n data[name] = {\n \"name\": name,\n \"mother\": row[\"mother\"] or None,\n \"father\": row[\"father\"] or None,\n \"trait\": (True if row[\"trait\"] == \"1\" else\n False if row[\"trait\"] == \"0\" else None)\n }\n return data", "def load_movies(filepath):\n movies = dict()\n with open(filepath, 'rb') as file:\n reader = csv.reader(file, delimiter=',')\n next(reader, None) # Skip header\n for row in reader:\n movie_id, title, _ = row\n movies[int(movie_id)] = {\"id\": int(movie_id), \"title\": title}\n\n return movies", "def read_csv(path: str) -> list[dict[str, str]]:\n with open(path, 'r') as f:\n return list(csv.DictReader(f))", "def _read_csv_to_dictionary_list(file_name):\n catalog_list = []\n with open(file_name) as csvfile:\n reader = csv.DictReader(csvfile)\n for item in reader:\n catalog_list.append(item)\n return catalog_list", "def load_data(filename):\n data = dict()\n with open(filename) as f:\n reader = csv.DictReader(f)\n for row in reader:\n name = row[\"name\"]\n data[name] = {\n \"name\": name,\n \"mother\": row[\"mother\"],\n \"father\": row[\"father\"],\n \"trait\": (True if row[\"trait\"] == \"1\" else\n False if row[\"trait\"] == \"0\" else None)\n }\n return data", "def read_file(file):\n \n dictionary = {}\n csv_fp = csv.reader(file)\n #L[46] = manufacturer, L[63] = year\n #L[4]= city mileage, L[34]=highway mileage\n for line in csv_fp:\n #Skip the headings and the year 2017\n if (not (line[46] == 'make')) and (not (line[63] == '2017')):\n if line[46] in dictionary:\n #Add the city and highway mileage if the year has been made\n if line[63] in dictionary[line[46]]:\n dictionary[line[46]][line[63]][0] += [int(line[4])]\n dictionary[line[46]][line[63]][1] += [int(line[34])]\n #Add the year and data if it was not made previously\n else:\n dictionary[line[46]][line[63]] = [[int(line[4])],\\\n [int(line[34])]]\n #Adds a new manufacturer\n else:\n dictionary[line[46]] = {line[63]:[[int(line[4])],\\\n [int(line[34])]]}\n return dictionary", "def load_data(filepath):\n with open(filepath) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n yield row", "def _convert_csv_column_to_dict(csv_data, column):\n results = dict()\n\n for row in csv_data:\n key = row[0]\n data = row[1:][column]\n\n if data:\n if key not in results:\n results[key] = data.strip() if data else \"\"\n else:\n # append multiple choice questions\n results[key] += \"|{0}\".format(data.strip())\n\n return results", "def parse_trick_ascii(csv_file):\n data_file = csv.DictReader(open(csv_file))\n single_run_data_dict = {'altitude' : [0.0],\n 'latitude' : [0.0],\n 'longitude' : [0.0]}\n # Your code here\n # ...\n # return the dict\n return single_run_data_dict", "def load_dict_file(file):\n with open(file, 'r', newline='') as csv_reader:\n return json.load(csv_reader)", "def csv_dict_reader(file_obj):\n #import re\n #file = open(file_obj)\n\n # reader = csv.DictReader(file_obj)\n # for line in reader:\n # print(line[\"Name\"])", "def read_from_csv(self, csv_file):\n data = []\n with codecs.open(csv_file, 'r', encoding='utf-8') as csvfile:\n header = None\n for i, line in enumerate(csvfile):\n line_split = [x.strip() for x in line.split(\"|\")]\n line_data = [x for x in line_split if len(x) > 0]\n if i == 0:\n header = line_data\n else:\n entry = {}\n for i,datum in enumerate(line_data):\n entry[header[i]] = datum\n data.append(entry)\n print \"Loaded %d entries from %s\" % (len(data), csv_file)\n return data", "def read_names_into_dict():\n d = dict()\n with open(\"SP_500_firms.csv\") as csvfile:\n input_file = csv.DictReader(csvfile)\n for row in input_file:\n #print(row)\n d[row['Symbol']] = [row['Name'],row['Sector']]\n return d", "def load_from_csv(self, path_to_csv_file):\n\n with open(path_to_csv_file, 'r') as file:\n reader = DictReader(file)\n for row in reader:\n item = {key: value for key, value in row.items()}\n item['id'] = int(item['id'])\n self.dump_db.append(item)", "def loadCSV(input_file):", "def load_from_file_csv(cls):\n fields = []\n rows = []\n new_dict = {}\n new_list = []\n key = \"\"\n filename = cls.__name__ + \".csv\"\n with open(filename) as fp:\n reader = csv.reader(fp)\n fields = next(reader)\n for row in reader:\n rows.append(row)\n for row in rows:\n i = 0\n new_dict = new_dict.fromkeys(fields)\n for attr in fields:\n key = fields[i]\n value = row[i]\n new_dict[key] = value\n i += 1\n new_list.append(cls.create(**new_dict))\n return new_list", "def load_csv(filename):\n results = defaultdict(list)\n with open(filename, 'r') as f:\n reader = csv.DictReader(f)\n for line in reader:\n results[line['sid']].append(line)\n return results", "def open_client_file_to_dict():\n clients_dict = []\n file = open(r'../clientmailerproj/client.csv', encoding='utf-8-sig')\n client_ordered_dict = csv.DictReader(file)\n for row in client_ordered_dict:\n clients_dict.append({\n 'First': row['First Name'],\n 'Last': row['Last Name'],\n 'Company': row['Account Name'],\n 'Email': row['Email'],\n 'Job': row['Job']\n })\n return clients_dict", "def csv_to_dict_list(file_path, char_sep=\"|\"):\n with open(file_path, mode='r') as f:\n d = [{k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True, delimiter=char_sep)]\n return d", "def read_file(filepath: str) -> dict:\n if not filepath.endswith(\".csv\"):\n raise RuntimeError(\"File extension must be .csv\")\n\n people = {}\n with open(filepath) as csv:\n for line in csv:\n email, person = Parser.parse_line(line.rstrip(\"\\n\"))\n if email not in people:\n people[email] = person\n else:\n print(\"Ignoring person with duplicate email {}\".format(email))\n return people", "def _process_csv_data(csv_file, user_data_map):\n with open(csv_file, 'r') as csvfile:\n rows = csv.reader(csvfile)\n for row in rows:\n if len(row) < 2:\n print('The CSV file is not in expected format.')\n raise Exception\n user_data_map[row[1].lower()] = row[0]", "def open_csv(file, dct):\n with open(file) as csv_file:\n f_csv = csv.reader(csv_file)\n column_headings = next(f_csv)\n csv_row = namedtuple('Row', column_headings)\n for rows in f_csv:\n row = csv_row(*rows)\n dct[row.term] = row.definition", "def read_sailor_data(filename):\n\td=OrderedDict()\n\twith open(filename) as csvfile:\n\t\trdr = csv.reader(csvfile)\t\n\t\tfor i in rdr:\n\t\t\t#This except is so that if the line trying to be inputted into the dictionary is a string\n\t\t\t#It will ignore it and go to the next line\n\t\t\ttry: d[i[0]]=(float(i[1]),float(i[2]))\n\t\t\texcept: None\n\treturn d", "def load_data(self):\n df = pandas.read_csv(self.path)\n self.data_dict = df.to_dict(orient=\"list\")\n return self.data_dict", "def _load_dict(infile):\n\n # read the data into a list\n data = []\n\n # open the file\n f = open(infile)\n\n for line in f:\n # ignore hashed lines\n if not line.startswith('#') and not line.startswith('@'):\n\n # mind to strip newlines\n data.append(line.strip('\\n\\r').split('\\t'))\n \n # create the dictionary in which the data will be stored\n d = {}\n\n # check for first line, if a local ID is given in the header (or simply\n # \"ID\"), take this line as the ID, otherwise create it\n if data[0][0].lower() in ['local_id','localid']:\n local_id = True\n else:\n local_id = False\n\n # iterate over data and fill the dictionary (a bit inefficient, but enough\n # for the moment)\n i = 1\n for line in data[1:]:\n if local_id:\n d[int(line[0])] = line[1:]\n else:\n d[i] = line\n i += 1\n\n # assign the header to d[0]\n if local_id:\n d[0] = [x.lower() for x in data[0][1:]]\n else:\n d[0] = [x.lower() for x in data[0]]\n\n # return the stuff\n return d", "def getCSVData(filename, key):\n cr = csv.DictReader(open(filename,'r'))\n data = {}\n order = []\n for r in cr:\n k = r[key]\n data[k] = r\n order.append(k)\n fields = cr.fieldnames\n return data, order", "def read_csv_label(csv_file):\n labels = {}\n with open(csv_file) as f:\n reader = csv.reader(f)\n next(reader)\n for row in reader:\n try:\n labels[row[0]] = int(row[1])\n except:\n labels[row[0]] = 999\n return labels", "def read_strong_csv(strong_meta_csv_path):\n with open(strong_meta_csv_path, 'r') as fr:\n reader = csv.reader(fr, delimiter='\\t')\n lines = list(reader)\n \n meta_dict = {}\n for line in lines:\n [audio_name, begin_time, end_time, label] = line\n meta = {'begin_time': begin_time, 'end_time': end_time, 'label': label}\n if audio_name in meta_dict:\n meta_dict[audio_name].append(meta)\n else:\n meta_dict[audio_name] = [meta]\n \n return meta_dict", "def load_csv(path: Path) -> Any:\n with open(path, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n items = list(reader)\n return items", "def cart_from_csv(csv_file_path):\n prices = {}\n with open(csv_file_path) as csvfile:\n for i, row in enumerate(csv.reader(csvfile, delimiter=',')):\n if len(row) != 2:\n raise MalformedCSV('Each CSV row should contain exactly 2'\n ' rows, not %s. -> name,price')\n prices[utf8(row[0])] = float(row[1])\n return Cart(prices)", "def load_state_id_mapping():\n\n MAPPING_CSV = \"./locations_state.csv\"\n with open(MAPPING_CSV) as f:\n reader = csv.reader(f)\n state_id_mapping = {}\n \n # Skip the header\n next(reader)\n\n for row in reader:\n state_id = row[1]\n state_name = row[2]\n state_id_mapping[state_name] = state_id\n \n return state_id_mapping", "def from_csv(self, path_to_load):\n import pandas as pd\n\n df = pd.read_csv(path_to_load)\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Remove unnnamed\n\n self.results['cids'] = list()\n self.results['differences'] = list()\n self.results['experimental_values'] = list()\n\n pd_dict = df.to_dict()\n length = len(pd_dict['cids'])\n for cid in [pd_dict['cids'][i] for i in range(0, length)]:\n self._results['cids'].append(cid)\n for cid in [pd_dict['differences'][i] for i in range(0, length)]:\n self._results['differences'].append(cid)\n for cid in [pd_dict['experimental_values'][i]\n for i in range(0, length)]:\n self._results['experimental_values'].append(cid)", "def CSVReader(self, input_file):\n f = open(input_file, 'r')\n reader = csv.reader(f)\n headers = reader.next()\n reader = csv.DictReader(f, headers)\n return reader", "def load_id_state_mapping():\n\n MAPPING_CSV = \"./locations.csv\"\n with open(MAPPING_CSV) as f:\n reader = csv.reader(f)\n id_state_mapping = {}\n \n # Skip the header\n next(reader)\n\n for row in reader:\n state_id = row[1]\n state_name = row[2]\n id_state_mapping[state_id] = state_name\n \n return id_state_mapping", "def _read_headers(self, fp):\n d = {}\n if isinstance(fp, str):\n csvfile = open(fp, newline=\"\")\n else:\n csvfile = fp\n csvreader = csv.reader(csvfile, delimiter=\",\", quotechar='\"')\n for row in csvreader:\n if row[0].isdigit():\n break\n else:\n d[row[0]] = row[1:]\n\n return d", "def read_file_convert_dict(file: str) -> dict:\n states_code = pd.read_csv(file)\n states_code = states_code.set_index('abbreviation')\n dict_y = states_code['state'].to_dict()\n return dict_y", "def read_relevance_from_csv(corpus):\n filename = config.CORPUS[corpus]['relevance_file']\n relevance_dict = dict()\n if os.path.exists(filename):\n print('reading from relevance csv')\n with open(filename, 'r') as data_file:\n reader = csv.reader(data_file)\n for row in reader:\n relevance_dict[row[0]] = (ast.literal_eval(row[1]), ast.literal_eval(row[2]))\n return relevance_dict\n\n return {}", "def cell_map_from_csv(self, source_file: str) -> None:\n if source_file[-4:] == '.csv':\n try:\n self._import_source_data(source_file)\n except Exception:\n print(\"Problem with that CSV file. File extension?\")", "def load_csv(input_filename_state):\n dataset = {}\n with open(input_filename_state) as f:\n reader = csv.reader(f)\n header = next(reader, None)\n\n location_col = -1\n week_ahead_col = -1\n quantile_col = -1\n value_col = -1 \n\n\n for i in range(len(header)):\n if header[i] == \"place\":\n location_col = i\n elif header[i] == \"week_ahead\":\n week_ahead_col = i\n elif header[i] == \"quantile\":\n quantile_col = i \n elif header[i] == \"value\":\n value_col = i\n \n for row in reader:\n state = row[location_col]\n\n # Skip the state if it is not listed in reichlab's state list.\n if state not in STATE_ID_MAPPING:\n continue\n state_id = STATE_ID_MAPPING[state]\n week_ahead = int(row[week_ahead_col])\n quantile = row[quantile_col]\n val = max(float(row[value_col]), 0)\n if week_ahead not in dataset:\n dataset[week_ahead] = {}\n if state_id not in dataset[week_ahead]:\n dataset[week_ahead][state_id] = {}\n dataset[week_ahead][state_id][quantile] = val\n return dataset", "def parse_file(file_path): \n map = OrderedDict() \n with open(file_path) as file:\n reader = csv.reader(file, delimiter='\\t')\n headers = next(reader)\n for i in range(len(headers)):\n # print(headers[i])\n map[headers[i]]=np.array([])\n for row in reader:\n for i in range(len(row)):\n map[headers[i]]=np.append(map[headers[i]],row[i])\n return map", "def getPrefectureLocationDict(prefecture_location_file):\n\tfile_handle = open(prefecture_location_file,'rb')\n file_reader = csv.DictReader(file_handle)\n\n prefec_location_dict = {}\n counter = 0\n for row in file_reader:\n prefec_location_dict[row['PREF_NAME']] = row\n counter += 1\n assert len(prefec_location_dict.keys()) == counter\n\n file_handle.close()\n return prefec_location_dict", "def walk_csv(self, filepath: str):\n with open(filepath, encoding='ISO-8859-1') as f:\n reader = csv.DictReader(f)\n for row in reader:\n logger.debug('Loading map {}'.format(row.get('id', None)))\n yield row", "def read_server_csv_file(fname):\n data = {}\n with open(fname) as csv_data:\n csv_reader = csv.reader(csv_data)\n row_num = 0\n for row in csv_reader:\n row_num += 1\n if row[0] == 'hostname' and row_num == 1:\n continue # ignore first line if first field looks like header\n # no leading/trailing spaces in hostnames\n row[0] = row[0].strip()\n data[row[1]] = {'hostname': row[0],\n 'serial': row[1],\n 'ip': row[2],\n 'netmask': row[3],\n 'gateway': row[4]}\n return data", "def csv_dict_reader(file_obj):\n reader = csv.DictReader(file_obj, delimiter=',')\n for line in reader:\n print(line[\"first_name\"]),\n print(line[\"last_name\"])", "def get_rows(csv):\n\n labels = csv[0][2:].split(',')\n\n # Convert each row into a hash of label: value\n return [dict(zip(labels, row.split(','))) for row in csv[1:]]", "def create_waves_dict(csv_file):\n with open(csv_file) as file:\n reader = csv.DictReader(file)\n waves_dict = {row[\"Date\"]: row[\"Wave Height\"] for row in reader}\n return waves_dict", "def DictData(self):\n reader = csv.DictReader( open( self.file, \"rU\" ), dialect = \"excel\" )\n return reader", "def _read_csv(file_name):\n with open(file_name) as boards:\n rows = csv.DictReader(boards, delimiter=',', quotechar='\"')\n formatted_data = []\n for row in rows:\n formatted_data.append(dict(row))\n return formatted_data", "def getRiverIDs(lookupCsv):\n\n d = {}\n with open(lookupCsv, \"rb\") as f:\n reader = csv.reader(f)\n\n # Discard header row\n reader.next()\n\n for row in reader:\n d[row[0]] = row[1]\n\n return d", "def dictionary_formation():\r\n sales_data = {}\r\n with open('beer_data.csv', \"r\") as data_file:\r\n file_contents = csv.reader(data_file, delimiter=',')\r\n #Value of lines_read used as key value for each dictionary\r\n #in sales_data\r\n lines_read = 1\r\n for line in file_contents:\r\n if lines_read == 1:\r\n lines_read = lines_read + 1\r\n else:\r\n #Stores each column in row as key value in dictionary\r\n sales_data[str(lines_read)] = {\r\n \"invoice_number\": line[0],\r\n \"customer\": line[1],\r\n \"date_required\": line[2],\r\n \"recipe\": line[3],\r\n \"gyle_number\": line[4],\r\n \"quantity_ordered\": int(line[5])\r\n }\r\n lines_read = lines_read + 1\r\n data_file.close()\r\n return sales_data", "def load_data_from_csv(csv_file, users_to_i = {}, items_to_i = {}):\n raw_data = []\n with open(csv_file) as f:\n csvreader = csv.reader(f)\n # skipping first row (header)\n next(csvreader)\n for user, item in csvreader:\n raw_data.append((user, item))\n return load_data_from_array(raw_data, users_to_i, items_to_i)", "def readData(filename):\r\n data_d = {}\r\n with open(filename) as f:\r\n df = pd.read_csv(f, header=0, dtype='str',sep=';')\r\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')]\r\n df_dict = df.to_dict(orient='index')\r\n for i,val in df_dict.iteritems(): \r\n clean_row = [(k, p.proc(v)) for (k, v) in val.iteritems()]\r\n row_id = val['line_nr']\r\n data_d[row_id] = dict(clean_row)\r\n return data_d\r\n return df", "def open_some_data(the_file_name: str) -> dict:\n\n result: dict = open_csv(file_name=the_file_name)\n return result", "def _load_price_csv(symbol):\n with open(f\"data_public/prices-{symbol}.csv\", \"r\") as csvfile:\n price_by_date = {}\n reader = csv.reader(csvfile, delimiter=',')\n next(reader) # discard header\n for row in reader:\n price_by_date[row[0]] = float(row[1])\n return price_by_date", "def read_csv(self, inputfile):\n d = csv.reader(inputfile)\n for row in d.read():\n self.translations[row[0]] = row[1]", "def parse_csvfile(self, csvfile):\n\n logging.info(\"Parseing csvfile: %s\" % basename(csvfile))\n fields = []\n data = {}\n try:\n with open(csvfile) as f:\n for line in f:\n line = line.strip()\n # Skip empty or commented line\n if not line or line[0] == \"#\":\n continue\n if not fields:\n # The first valid line defines fields.\n fields = [x.strip() for x in line.split(\",\")]\n for f in self.REQUIRED_FIELDS:\n if f not in fields:\n logging.error(\"Failed to find %s field. \"\n \"Aborted.\" % f)\n sys.exit(1)\n else:\n # The rest lines are data\n values = [x.strip() for x in line.split(\",\")]\n record = {}\n for k, v in zip(fields, values):\n record[k] = v\n # Convert date time string to epoch seconds\n record[\"time_h\"] = self.parse_timestr(record[\"time_h\"])\n node = record[\"name\"]\n if data.get(node, None):\n data[node].append(record)\n else:\n data[node] = [record]\n except Exception as e:\n logging.exception(\"Failed to parsing the csvfile. \"\n \"See stack trace below:\")\n sys.exit(1)\n\n # While it didn't occur often, I observed that data in CSV files\n # generated by cbtool monextrac command were not in time order.\n # So sort them.\n logging.debug(\"Sorting the data\")\n for node in data.keys():\n data[node].sort(lambda x, y: cmp(int(x[\"time\"]), int(y[\"time\"])))\n\n return data, fields", "def getUserDict(user_file):\n file_handle = open(user_file,'rb')\n file_reader = csv.DictReader(file_handle)\n\n user_dict = {}\n counter = 0\n for row in file_reader:\n user_dict[row['USER_ID_hash']] = row\n counter += 1\n assert len(user_dict.keys()) == counter\n\n file_handle.close()\n return user_dict", "def load_from_file_csv(cls):\n try:\n with open(cls.__name__ + \".csv\", \"r\") as f:\n ld = []\n reader = csv.DictReader(f)\n for row in reader:\n for key, val in row.items():\n row[key] = int(val)\n ld.append(row)\n return [cls.create(**item) for item in ld]\n except FileNotFoundError:\n return []", "def loadData(self, aircraftCSV='aircraft.csv'):\n aircraftDict = {}\n \n with open(aircraftCSV, 'r') as f:\n reader = csv.reader(f, delimiter=',')\n for line in reader:\n #if imperial convert to metric\n if line[2] == 'imperial':\n range = float(line[4]) * 8 / 5\n else:\n range = float(line[4])\n aircraftDict[line[0]] = [line[1], line[3], range]\n self.aircraftDict = aircraftDict", "def gen_dict():\n lines = [line for line in csv.reader(open(__ppath__ + \"/data/occupations.csv\"))] # uses a csv.reader to parse the file, converts the generic iterable to a list\n lines = [(line[0],float(line[1])) for line in lines[1:-2]]# removes the column names and \"Total\" row, re-expresses as a list of tuples to enable dictionary conversion\n lines.append((\"Unemployed\",0.2)) # accounts for missing 0.2% of jobs\n return dict(lines) # converts to dictionary", "def make_dict():\n\n problems = {}\n\n with open('problems.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n cc_name = row['cc_name']\n url_link = row['url_link']\n problems[cc_name] = url_link\n\n return problems", "def load_csv(filename):\n # Open csvfile\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n\n # Put data in gloabal list\n for row in reader:\n # Get data of subject with either or both milk and peanut allergy\n if row[\"MILK_ALG_START\"] != \"NA\" or row[\"PEANUT_ALG_START\"] != \"NA\":\n sub_list = list()\n for key in DATA_KEYS:\n sub_list.append(row[key])\n\n # Add data of subject to all data \n data_list.append(sub_list)", "def read_2tuple_dictionary(filename):\r\n dictionaryoutput = {}\r\n with open(filename) as file:\r\n entries = csv.reader(file)\r\n for item in entries:\r\n # use tuple of company (i.e., VEST01, etc) and item\r\n # companies have different prices\r\n dictionaryoutput[(item[0], item[1])] = item[2]\r\n return dictionaryoutput", "def read_csv_rows(path: str) -> list[dict[str, str]]:\n file_handle = open(\"survey\", encoding=\"utf8\")\n csv_reader = DictReader(file_handle)\n rows: list[dict[str, str]] = []\n for row in csv_reader:\n rows.append(row)\n file_handle.close()\n return rows", "def read_data_from_csv(csv_file, header=None, **kwargs):\n if os.path.isabs(csv_file) == False:\n path_to_csv = os.path.join(csv_file)\n else:\n path_to_csv = csv_file\n row_list = []\n if \"field_sep\" not in kwargs.keys():\n field_sep = ','\n else:\n field_sep = kwargs.get(\"field_sep\")\n with open(path_to_csv, mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=field_sep, fieldnames=header)\n for record in csv_reader:\n if list(record.values())[0].startswith(\"#\") is not True:\n # IT'S A COMMENT IF IT STARTS WITH \"#\" \n # IF THIS IS YOUR HEADER ROW, SUPPLY A LIST OF COLUMN NAMES WHEN CALLING THE FUNCTION\n row_list.append(record)\n return row_list", "def getCouponDict(coupon_file):\n file_handle = open(coupon_file,'rb')\n file_reader = csv.DictReader(file_handle)\n\n counter = 0\n coupon_dict = {}\n for row in file_reader:\n coupon_dict[row['COUPON_ID_hash']] = row\n counter += 1\n assert len(coupon_dict.keys()) == counter\n\n file_handle.close()\n return coupon_dict", "def read_tags(csv_path):\n tags = {}\n with open(csv_path, 'r') as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n tags[row['image_name']] = row['tags'].split()\n return tags", "def read_csv(product_name=str, directory=DIRS['EOIR_DATA_DIR']):\n filename = ('%s.csv' % product_name)\n path = get_dir(os.path.join(directory, filename))\n with io.open(path, mode='r', encoding='utf-8-sig') as f:\n spec_dict = {}\n filtered = (line.replace(\"\\n\", '') for line in f) # Removes \\n from the created as a byproduct of encoding\n for line in filtered:\n field, value = line.split(',')\n if has_number(value) and value.find('\"') == -1:\n if value.find('x') != -1:\n if value.find('.') != -1:\n value = [float(i) for i in value.split('x')]\n else:\n value = [int(i) for i in value.split('x')]\n else:\n value = float(value)\n else:\n value = value.replace('\"', '')\n if value.find('/') != -1:\n value = [str(i) for i in value.split('/')]\n elif (value.lower()).find('true') != -1:\n value = True\n elif (value.lower()).find('false') != -1:\n value = False\n else:\n value = str(value)\n spec_dict['%s' % str(field)] = value\n f.close()\n return spec_dict" ]
[ "0.79064244", "0.7794513", "0.769313", "0.7455252", "0.73723876", "0.73611784", "0.73122406", "0.7276127", "0.72540885", "0.7231248", "0.7229994", "0.7220004", "0.7188417", "0.7093654", "0.7093654", "0.70798486", "0.70293057", "0.70100296", "0.6991526", "0.69660616", "0.69464505", "0.6916043", "0.6906446", "0.68984413", "0.6817233", "0.6790163", "0.67362744", "0.67073095", "0.6682901", "0.66325384", "0.6625556", "0.6618834", "0.6613472", "0.6606508", "0.6600292", "0.6578678", "0.6577078", "0.6551332", "0.65488356", "0.65321034", "0.65229475", "0.64954233", "0.6483154", "0.646959", "0.6454332", "0.6453392", "0.64450896", "0.6431119", "0.64272124", "0.64177203", "0.6403682", "0.6396027", "0.63849324", "0.6361197", "0.6345101", "0.63435864", "0.6341396", "0.62969786", "0.6292448", "0.62623525", "0.626231", "0.62579924", "0.6249829", "0.6233172", "0.6230846", "0.62178147", "0.621113", "0.6198156", "0.6196726", "0.6195861", "0.6189485", "0.61817026", "0.61655843", "0.61646247", "0.6144922", "0.61398816", "0.6125157", "0.6124593", "0.6114931", "0.6110455", "0.6108823", "0.60835505", "0.60795385", "0.6073187", "0.6071996", "0.60583127", "0.60515696", "0.6036126", "0.60265225", "0.600278", "0.59966964", "0.5996106", "0.5979744", "0.5979414", "0.5973037", "0.59637773", "0.5958329", "0.5940701", "0.5932514", "0.59309685" ]
0.7192429
12
Write a dictionary to a CSV file with the key as the first column.
def write_dict_to_csv(filename, data): with open(filename, 'w') as csvfile: writer = csv.writer(csvfile) keys = sorted(data.keys()) for key in keys: value = data[key] row = [str(key)] + [str(v) for v in value] writer.writerow(row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def WriteDictToCSV(csv_file,csv_columns,dict_data):\n with open('history.csv','wb') as csvfile:\n w = csv.writer(csvfile)\n w.writerows(dict_data.items())\n\n return", "def write_csv(dictionary):\n\n with open(constant.METADATA_FILE, 'a', newline='') as file:\n writer = csv.DictWriter(file, fieldnames=dictionary.keys())\n try:\n writer.writerow(dictionary)\n except Exception:\n writeable = {k: str(v).encode(\"utf-8\") for k, v in dictionary.items()}\n writer.writerow(writeable)", "def write_to_csv(data_in_dictionary, csvfile):\n with open(csvfile, 'wb') as f:\n fieldnames = data_in_dictionary.keys()\n writer = csv.DictWriter(f, fieldnames)\n writer.writeheader()\n writer.writerow(data_in_dictionary)\n logger.info(\"Data written to file: \" + csvfile)", "def write_csv(fn, toCSV):\n keys = toCSV[0].keys()\n with open(fn, 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(toCSV)", "def write_csv(self, _dict, filename):\n with open(filename, 'w') as f:\n f.write('\"'+'\";\"'.join(_dict.keys())+'\"\\n')\n for i in np.arange(len(_dict[list(_dict.keys())[0]])):\n values = []\n for col in _dict.keys():\n try:\n values.append(str(_dict[col][i]))\n except IndexError as e:\n # LSTM don't have first times available because of lacking history\n pass\n f.write(';'.join(values)+'\\n')\n\n logging.info('Wrote {}'.format(filename))\n self._upload_to_bucket(filename, filename)", "def exportDict (dict, nameOfFile):\n dictTable = pd.DataFrame(dict)\n dictTable.to_csv(nameOfFile)", "def dict_to_csv(filename, dictionary, paramdict=True):\n if paramdict:\n filename += '_params'\n \n with open(filename + '.csv', 'w') as f:\n for key in dictionary.keys():\n if type(dictionary[key]) == tuple:\n f.write(\"%s,%s, %s \\n\"%(key, dictionary[key][0], dictionary[key][1]))\n else:\n f.write(\"%s,%s \\n\"%(key, dictionary[key]))\n print('Saved %s.csv.'%filename)", "def writeToCsv(news_dict, f_name):\n\n f_data = open(f_name, \"wb\")\n writer = csv.writer(f_data)\n for k in news_dict:\n writer.writerow([k, news_dict[k].replace(\",\", \"\")])\n f_data.close()", "def csv_dict_writer(path, fieldnames, data):\n with open(path, \"wb\") as out_file:\n writer = csv.DictWriter(out_file, delimiter=',', fieldnames=fieldnames)\n writer.writeheader()\n for row in data:\n writer.writerow(row)", "def csv_dict_writer(path, fieldnames, data):\n with open(path, \"wb\") as out_file:\n writer = csv.DictWriter(out_file, delimiter=',', fieldnames=fieldnames)\n writer.writeheader()\n for row in data:\n writer.writerow(row)", "def dict2csv(dictlist, csvfile):\n f = open(csvfile, 'wb')\n\n fieldnames = dictlist[0].keys()\n\n csvwriter = csv.DictWriter(f, delimiter=',', fieldnames=fieldnames)\n csvwriter.writerow(dict((fn, fn) for fn in fieldnames))\n for row in dictlist:\n csvwriter.writerow(row)\n fn.close()", "def write_csv(data):\n\n with open('data.csv', 'w') as file:\n writer = csv.DictWriter(file, fieldnames=data[0].keys())\n writer.writeheader()\n for row in data:\n writer.writerow(row)", "def make_csv(data_dict):\r\n fieldnames = ['name'] + data_dict.itervalues().next().keys()\r\n with open('data.csv', 'w') as csvfile:\r\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\r\n writer.writeheader()\r\n for record in data_dict:\r\n person = data_dict[record]\r\n person['name'] = record\r\n assert set(person.keys()) == set(fieldnames)\r\n writer.writerow(person)", "def make_csv(data_dict):\n fieldnames = ['name'] + data_dict.itervalues().next().keys()\n with open('data.csv', 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for record in data_dict:\n person = data_dict[record]\n person['name'] = record\n assert set(person.keys()) == set(fieldnames)\n writer.writerow(person)", "def save(filename, data, keys):\n with open(filename,'w') as fd:\n csv_out = csv.writer(fd, delimiter=',', quotechar='\"')\n csv_out.writerow(keys)\n for row in zip( *[data[k] for k in keys] ):\n csv_out.writerow(row)", "def write_arr_to_csv(arr, filename):\n keys = arr[0].keys()\n with open(f\"{filename}.csv\", \"w\", newline='') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(arr)", "def write_csv(filename, **values):\n writeheader = not os.path.isfile(filename)\n fieldnames = sorted(values.keys())\n\n with open(filename, 'a') as f:\n writer = csv.DictWriter(f, fieldnames, dialect='excel-tab')\n if writeheader:\n writer.writeheader()\n writer.writerow(values)", "def save(csv_dict, path):\n with open(path, \"wb\") as f:\n writer = csv.writer(f)\n \n # first, the headers\n header_index_map = {}\n header_row = []\n \n keys = csv_dict.keys()\n keys.sort() \n keys = keys\n \n i = 0\n for header in keys:\n header_row.append(header)\n header_index_map[header] = i\n i += 1\n writer.writerow(['id'] + header_row)\n \n # now, each item id\n ids = csv_dict[keys[1]].keys()\n ids.sort()\n for id in ids:\n item_row = [id] + [None] * len(csv_dict.keys())\n for header, i in header_index_map.iteritems():\n i += 1\n v = [c for c in csv_dict[header][id] if c is not None]\n item_row[i] = \"||\".join(v)\n if item_row[i] is not None:\n item_row[i] = item_row[i].encode('ascii', 'ignore')\n writer.writerow(item_row)", "def traces_to_csv(filename, dictionary):\n \n headings = []\n for key_string in list(dictionary.keys()):\n headings.append(key_string)\n \n # Use first element of dict to determine array length\n length = len(dictionary[headings[0]])\n filename += '_traces'\n with open(filename + '.csv', 'w') as f:\n f.write(','.join(headings))\n f.write('\\n')\n for i in range(length):\n values = []\n for key in dictionary.keys():\n values.append(dictionary[key][i].astype(str))\n f.write(','.join(values))\n f.write('\\n')\n print('Saved %s.csv.'%filename)", "def dict_print(self, output_file = \"dict.csv\"):\t\n\n\t\twith codecs.open(output_file,\"w\",encoding='utf-8') as f:\n\t\t\tfor (v,k) in self.token_key.items(): f.write(\"%s,%d\\n\" % (v,k))", "def write_csv(table: Table, file: str, header: Sequence[str] = None) -> None:\n fieldnames = list(table[0].keys())\n for hdr in reversed(header):\n if hdr in fieldnames:\n fieldnames.remove(hdr)\n fieldnames.insert(0, hdr)\n\n with open(file, \"w\", encoding=\"utf-8-sig\", errors=\"replace\", newline=\"\") as csvfile:\n writer = DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for row in table:\n writer.writerow(row)", "def writeToMonthCsv(news_dict):\n\n for k in news_dict:\n output_f = open(k + \".csv\", \"wb\")\n writer = csv.writer(output_f)\n writer.writerow([news_dict[k].replace(\",\", \"\").encode(\"utf-8\")])\n output_f.close()", "def write_csv(arr, product, file_path):\n os.chdir(file_path)\n keys = arr[0].keys()\n now = datetime.now()\n file_name = product + now.strftime(\"%m%d%y_%H%M\") + '.csv'\n try:\n with open(file_name, \"w\", newline='', encoding='utf-8') as a_file:\n dict_writer = csv.DictWriter(a_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(arr)\n a_file.close()\n except OSError:\n # file not found\n print(f\"File: ${file_name} not found\")\n return file_name", "def csv_writer(data, directory, filename):\n # Create path\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n fieldnames = []\n for name, value in data[0].items():\n fieldnames.append(name)\n\n with open(os.path.join(directory, filename), 'w') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames, lineterminator = '\\n')\n writer.writeheader()\n writer.writerows(data)", "def csv_writer(data, directory, filename):\n # Create path\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n fieldnames = []\n for name, value in data[0].items():\n fieldnames.append(name)\n\n with open(os.path.join(directory, filename), 'w') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames, lineterminator = '\\n')\n writer.writeheader()\n writer.writerows(data)", "def csv_writer(data, directory, filename):\n # Create path\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n fieldnames = []\n for name, value in data[0].items():\n fieldnames.append(name)\n\n with open(os.path.join(directory, filename), 'w') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames, lineterminator = '\\n')\n writer.writeheader()\n writer.writerows(data)", "def csv_writer(data, directory, filename):\n # Create path\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n fieldnames = []\n for name, value in data[0].items():\n fieldnames.append(name)\n\n with open(os.path.join(directory, filename), 'w') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames, lineterminator = '\\n')\n writer.writeheader()\n writer.writerows(data)", "def write_to_csv(self, data_points):\n keys = data_points[0].keys()\n with open(self.report_path, 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(data_points)", "def csvWrite(self, data, csvFileName):\n\twith open(csvFileName, 'w') as csv_file:\n\t\twriter = csv.writer(csv_file)\n\t\t\tfor key, value in data.items():\n\t\t\t\twriter.writerow([key,value])", "def dict_to_csv(total_distance, algorithm):\n # Creates or overwrites new csv from dict\n with open(f'results/visualisatie/results_{algorithm}_distance.csv', 'w', newline='') as infile:\n fields = ['Run', 'Total Distance']\n writer = csv.DictWriter(infile, fieldnames=fields)\n writer.writeheader()\n\n writer = csv.writer(infile, delimiter=',')\n input = total_distance\n for key, value in input.items():\n writer.writerow([key, value])", "def write_csv(f, extract, fields=None):\n keys = fields if fields is not None else extract[0].keys()\n dict_writer = csv.DictWriter(f, keys, extrasaction='ignore')\n dict_writer.writeheader()\n dict_writer.writerows(extract)\n return f", "def __create_csv(self):\n with open(self.__csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writeheader()", "def export_label_index_dict(label_index_dict):\r\n\tcsv_file = open('output.csv', 'w')\r\n\twriter = csv.writer(csv_file)\r\n\trow = ''\r\n\theader = 'Type,Prediction\\n'\r\n\tcsv_file.write(header)\r\n\tfor key in label_index_dict.keys():\r\n\t\trow = key + ',' + label_index_dict[key] + '\\n'\r\n\t\tcsv_file.write(row)", "def write_csv(file_name, data, column_names):\n with open(file_name, \"a\") as f:\n writer = csv.DictWriter(f, fieldnames=column_names)\n writer.writerow(data)", "def store_csv(self):\n\n with open(self.filepath.with_suffix(\".csv\"), 'w',\n newline='') as csvfile:\n fieldnames = ['counter', 'timestamp', 'acceleration']\n writer = DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n writer.writerows(self.values)", "def create_csv(csv_path, headers):\n with open(csv_path, 'w') as csv_file:\n writer = DictWriter(csv_file, fieldnames=headers)\n writer.writeheader()", "def csv_writer(data, path, arr):\n with open(path, \"w\", newline='') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames = arr)\n for line in data:\n writer.writerow(line)", "def write_csv_label(labels, csv_file):\n with open(csv_file, 'w') as f:\n writer = csv.writer(f)\n for key, value in labels.items():\n writer.writerow([key, value])", "def save_csv_file(votes: dict) -> None:\r\n with open(\"votingList.csv\", \"w\", newline=\"\") as csvfile:\r\n writer = csv.writer(csvfile)\r\n writer.writerow([\"First Name\", \"Last Name\", \"Vote\"])\r\n for vote in votes.keys():\r\n entry = votes[vote]\r\n fst, snd = vote.split()\r\n writer.writerow([fst, snd, entry])", "def to_csv(list_dicts, file_name):\n # We assume that all the dictionaries have the same keys\n fieldnames = list_dicts[0].keys()\n\n with open(file_name, 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, fieldnames)\n dict_writer.writeheader()\n dict_writer.writerows(list_dicts)", "def save_csv(outputfile):\n with open(outputfile, 'w', newline='') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(DATA_KEYS)\n\n # Add data to csv-file\n for data in data_list:\n writer.writerow(data)", "def writeDB(db):\n with open(filenameDB, \"w\") as csvfile:\n writer = csv.writer(csvfile)\n for k, v in db.items():\n writer.writerow([v, k])", "def dictKeysToCSV(d):\n return \",\".join([str(val) for val in nestedDictKeys(d)])", "def export_csv(data: dict, file_path: str):\n fieldnames = [\n 'briefy_id', 'number_required_assets', 'number_submissions', 'total_submissions_images',\n 'total_submissions_videos', 'total_submissions_others', 'total_archive_images',\n 'total_archive_videos', 'total_archive_others', 'total_delivery_images',\n 'total_delivery_videos', 'total_delivery_others', 'submission_links', 'archive_link',\n 'delivery_link', 'order_link'\n ]\n\n with open(file_path, 'w') as fout:\n writer = csv.DictWriter(fout, fieldnames)\n writer.writeheader()\n for key, value in data.items():\n writer.writerow(value)", "def write_csv(records, make_row, filename):\n def fieldnames(row):\n return [field[0] for field in row]\n\n writer = None\n\n with open(filename, \"w+\") as f:\n for record in records:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n row = make_row(record)\n if writer is None:\n writer = csv.DictWriter(f, fieldnames=fieldnames(row))\n writer.writeheader()\n writer.writerow(dict(row))", "def write_output_csv(filename, **kwargs):\n import csv\n import time\n\n intermediate = kwargs.pop(\"intermediate\", False)\n\n keys = sorted(kwargs.keys())\n num_vars = len(keys)\n\n if intermediate:\n full_filename = filename + \"_interm\"\n else:\n dot_index = filename.rfind('.')\n if dot_index != -1:\n full_filename = (filename[:dot_index]\n + time.strftime(\"%Y-%m-%d-%H.%M.%S\")\n + filename[dot_index:])\n else:\n full_filename = filename + time.strftime(\"%Y-%m-%d-%H.%M.%S\")\n\n # add current time to filename as an identifier\n with open(full_filename, 'w', newline='') as csvfile:\n\n writer = csv.writer(csvfile)\n\n # write header\n writer.writerow(keys)\n\n num_entries = len(kwargs[keys[0]])\n for i in range(num_entries):\n writer.writerow(kwargs[keys[j]][i] for j in range(num_vars))", "def uph_write(dictionary, fname, sep='\\t'):\n import csv\n\n col = ('time', 'zone', 'R', 'V')\n with open(fname, 'w', newline='') as f:\n writer = csv.writer(f, delimiter=sep, quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(['# {:^8s}'.format(x) for x in col])\n for i, (row) in enumerate(zip(*[dictionary[c] for c in col])):\n writer.writerow(['{:8.3e}'.format(x) for x in row])", "def export_csv(header, data):\n with StringIO() as tmp:\n writer = csv.DictWriter(tmp, fieldnames=header)\n writer.writeheader()\n writer.writerows(data)\n data = tmp.getvalue()\n\n return data", "def csv_file(data,output_dir,filename,order = [],head = True):\n with open(output_dir + filename + '.csv', 'w') as f:\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n return None", "def csv_write (data):\n \n csv_data=data[0:]\n csv1_data = open('backup.csv', 'a')\n csvwriter = csv.writer(csv1_data)\n\n count = 0\n\n for i in csv_data:\n if count == 0:\n header = i.keys()\n csvwriter.writerow(header)\n count += 1\n csvwriter.writerow(i.values())\n\n csv1_data.close()\n\n #http://blog.appliedinformaticsinc.com/how-to-parse-and-convert-json-to-csv-using-python/", "def csvWriter(data, out_file):\n print '[+] Writing CSV output.'\n logging.info('Writing CSV to ' + out_file + '.')\n headers = ['ID', 'Name', 'Path', 'Session ID', 'Count', 'Last Used Date (UTC)', 'Focus Time (ms)', 'Focus Count']\n\n with open(out_file, 'wb') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=headers, extrasaction='ignore')\n # Writes the header from list supplied to fieldnames keyword argument\n writer.writeheader()\n\n for i, dictionary in enumerate(data):\n # Insert the 'ID' value to each dictionary in the list. Add 1 to start ID at 1 instead of 0.\n dictionary['ID'] = i + 1\n # Convert the FILETIME object in the fourth index to human readable value\n dictionary['Last Used Date (UTC)'] = fileTime(dictionary['Last Used Date (UTC)'])\n writer.writerow(dictionary)\n\n csvfile.flush()\n csvfile.close()\n msg = 'Completed writing CSV file. Program exiting successfully.'\n print '[*]', msg\n logging.info(msg)", "def write_row(row: dict):\n row = {k: format_float(v) for k, v in row.items()}\n writer.writerow(row)\n csvfile.flush()", "def write_csv(estimates: ListOfDicts, output_csv: str) -> None:\n with open(output_csv, \"w\") as f:\n writer = csv.DictWriter(f, fieldnames=estimates[0].keys())\n writer.writeheader()\n for row in estimates:\n writer.writerow(row)\n logging.info(f\"Wrote estimates as {output_csv}\")", "def write_log(logfile, log_dict):\n with open(logfile, 'a') as f:\n c = csv.writer(f)\n if log_dict['epoch'] == 0: # write header for first epoch (dubbed as 0th epoch)\n c.writerow(log_dict.keys())\n\n c.writerow(log_dict.values())", "def writeCSV(filename, separator, data):\n \n filetowrite = open(filename, \"w\")\n values = []\n i = 0 #Count the number of objects already written\n for item in data:\n filetowrite.write(item)\n i += 1\n if i < len(data.keys()):\n filetowrite.write(separator)\n values.append(data[item])\n filetowrite.write(\"\\n\")\n i = 0\n for value in values:\n filetowrite.write(str(value))\n i += 1\n if i < len(values):\n filetowrite.write(separator)\n \n filetowrite.close()", "def write_dicts_to_csv(dict_list, output_file, fields=None, delimiter=',', verbose=True,\n append_to_file=False):\n if not dict_list:\n return\n with open(output_file, 'a' if append_to_file else 'w') as f:\n if not fields:\n fields = []\n for d in dict_list:\n fields.extend([key for key in d.keys() if key not in fields])\n writer = csv_unicode.UnicodeDictWriter(f, fields, delimiter=delimiter)\n if not append_to_file:\n writer.writeheader()\n writer.writerows(dict_list)\n else:\n writer = csv_unicode.UnicodeDictWriter(f, fields, delimiter=delimiter)\n if not append_to_file:\n writer.writeheader()\n for row in dict_list:\n d = OrderedDict([(key_field, value) for key_field, value in row.items() \\\n if key_field in fields])\n writer.writerow(d)\n if verbose:\n print 'File %s written.' % output_file", "def write_dict(outputfilename, dictionary):\r\n # May want to modify this code to pickle the key and value and alter the read dictionary to do the same.\r\n try:\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n for key, value in dictionary.items():\r\n outfile.write('%s,%s\\n' % (key, value))\r\n except:\r\n input(\"File still open! Please close and press enter to continue\")\r\n with open(outputfilename, 'w', newline='', encoding='utf-8') as outfile:\r\n for key, value in dictionary.items():\r\n outfile.write('%s,%s\\n' % (key, value))", "def writeToCsv(clue):\n filename = 'new_clue_import_for_editing.csv'\n f = open(filename, 'w')\n fieldnames = list(set([m['Clue_field'] for m in mapping]))\n fieldnames.append('date')\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n for c in clue:\n writer.writerow(c)\n f.close()", "def save_dataset_csv(self, path):\n cols = list(self.data_dict.keys())\n df = pd.DataFrame(self.data_dict, index=None, columns=cols)\n df.to_csv(path, index=True)", "def csv_from_dicts(fields: List[str], *dicts: dict, output: str = ''):\n\n if not dicts:\n logger.warning(f'No data to write .csv file')\n return\n\n if not output:\n _timestamp = datetime.now().strftime(\"%Y_%m_%d_%H%M%S\")\n output = f'{_timestamp}.csv'\n\n with open(output, 'w+') as f:\n writer = csv.DictWriter(f, fieldnames=fields)\n writer.writeheader()\n writerow = writer.writerow\n\n for d in dicts:\n writerow(d)\n\n logger.info(f'CSV created: {output}')", "def each_to_csv(data, key, value):\n data.to_csv(\"camelot/clean/nrld_{}_{}.csv\".format(key, value), index=False)\n return data", "def to_csv(self, filename, **csv_args):\n dataframe = pd.DataFrame.from_dict(self.to_dict())\n return dataframe.to_csv(filename, **csv_args)", "def outputapidata_csv(filename, data, headers=None):\n with open(filename,'w',encoding='utf-8',newline = \"\", ) as f:\n if headers:\n writer = csv.DictWriter(f,fieldnames = headers)\n writer.writeheader()\n else:\n writer = csv.DictWriter(f)\n writer.writerows(out)", "def save_data(data, filename):\r\n with open('C:\\\\Users\\\\kevin\\\\Documents\\\\' + filename, 'w', newline='') as out:\r\n csv_out = csv.writer(out)\r\n if filename == 'fields.csv':\r\n for key in data:\r\n csv_out.writerow((key, data[key]))", "def write_dict_txtfile(input_file_name, data_dict):\n \n output_file = open(input_file_name, 'w')\n output_file.write('Human Metabolome database')\n output_file.write('\\n\\n')\n\n for keys, values in data_dict.items():\n output_file.write(str(keys)+', '+str(values)+'\\n')", "def write_file(poet, info_dict):\r\n\r\n filename = SAVE_PATH + '/' + poet + '/' + str(info_dict['id']) + '_'+ str(info_dict['pagenum']) \\\r\n + '_' + info_dict['id2'] +'_' + info_dict['ord2'] \\\r\n + '_' + info_dict['id3'] + '_' + info_dict['ord3'] \\\r\n + '_' + info_dict['id4'] + '_' + info_dict['ord4'] + '.txt'\r\n\r\n print(filename)\r\n with open(filename, 'w', encoding='utf-16') as f:\r\n txt = ','.join([str(info_dict[k]) for k in KEYS ])\r\n txt = txt + '\\n' + '\\n'.join([x for x in info_dict['beyts']])\r\n f.write(txt)\r\n\r\n\r\n locale.setlocale(locale.LC_ALL, '')\r\n DELIMITER = ';'# if locale.localeconv()['decimal_point'] == ',' else ','\r\n\r\n list_of_lists = [[info_dict[k] for k in KEYS]]\r\n with open('D:/poem/molana.csv', 'a', newline='', encoding='utf-16') as csvfile:\r\n\r\n writer = csv.writer(csvfile, delimiter=DELIMITER)\r\n writer.writerows(list_of_lists)", "def generate_csv(table, header):\n with open(\"%s.csv\" % header, \"w\") as csvfile:\n for i in range(len(table)):\n for j in range(len(table[i])):\n if j != len(table[i])-1:\n tmp = table[i][j] + \",\"\n else:\n tmp = table[i][j] + \"\\n\"\n csvfile.write(tmp)", "def writerow(self, outdict):\r\n row = []\r\n for field in FatWriter.FIELDS:\r\n col = outdict.get(field, '')\r\n col = col.replace('\\t', ' ')\r\n col = col.replace('\\n', '\\\\n')\r\n row.append(col)\r\n self.outfile.write('\\t'.join(row) + '\\n')", "def write_csv(self, filelike):\r\n items = self.rows()\r\n writer = unicodecsv.writer(filelike, encoding=\"utf-8\")\r\n writer.writerow(self.header())\r\n for item in items:\r\n writer.writerow(item)", "def write_csv(fd, data):\n # df = pd.DataFrame.from_dict(data)\n df = pd.io.json.json_normalize(data)\n print(df.to_csv(index=False), file=fd)", "def write_to_csv(agents, filename):\n log.info(\"Writing CSV file '%s'...\" % filename)\n with open(filename, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=place_detail_keys)\n writer.writeheader()\n writer.writerows(agents)", "def create_notice_csv(d, f):\n header = list()\n for i in d[0].keys():\n if i != \"search_details\":\n try:\n for j in d[0][i].keys():\n header.append(i+\".\"+j)\n except:\n header.append(i[1:])\n\n data = list()\n for e in d:\n row = list()\n for i in header:\n keys = i.split(\".\")\n if len(keys) == 2:\n row.append(e[keys[0]][keys[1]])\n elif len(keys) == 1:\n row.append(e[\"_\" + keys[0]])\n else:\n continue\n\n data.append(row)\n\n with open(f, 'wb') as csv_file:\n csvwriter = csv.writer(csv_file, quoting=csv.QUOTE_NONNUMERIC)\n csvwriter.writerow(header)\n for row in data:\n csvwriter.writerow(row)\n return True", "def save_csv(self):\n if not self.__is_csv():\n # creates the csv file if it did not exist.\n self.__create_csv()\n try:\n with open(self.__csv_file_name, 'a', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writerow(self.__values)\n except IOError: # this exception avoid a product does not have saved in csv file\n time.sleep(0.5)\n self.save_csv()\n # display on the screen what is being record on csv\n for key, value in self.__values.items():\n print('{}: {}'.format(key, value), end='; ' if key != 'url' else '\\n')", "def serialise(rows: Iterable[Dict], path: pathlib.Path, **kwargs):\n\n fieldnames = settings.OUTPUT_HEADERS\n\n LOGGER.info(\"Writing CSV with headers: %s\", fieldnames)\n\n with path.open('w', newline='') as file:\n writer = csv.DictWriter(file, fieldnames=fieldnames, dialect=UrbanDialect, **kwargs)\n\n row_count = 0\n for row in rows:\n writer.writerow(row)\n\n row_count += 1\n\n if row_count:\n LOGGER.info(\"Wrote %s rows to '%s'\", row_count, file.name)\n else:\n path.unlink()\n LOGGER.info(\"Deleted '%s'\", file.name)", "def write_csv(self, out_file_name, header):\n\n with open(out_file_name, 'wb') as outf:\n writer = csv.writer(outf, quoting=csv.QUOTE_ALL)\n writer.writerow(header)\n writer.writerows(self.records)", "def dictValuesToCSV(d):\n return \",\".join([str(val) for val in nestedDictValues(d)])", "def write_files(items, path):\n with path.open('wb') as stream:\n writer = csv.writer(stream)\n for k, v in items.iteritems():\n if not v:\n continue\n row = [k] + v.strings()\n writer.writerow(row)", "def output(items, headers, outputFile):\n\tdictToValues = lambda d: \\\n\t\tmap(lambda h: d.get(h, ''), headers)\n\n\treturn writeCsv(outputFile, map(dictToValues, items))", "def make_csv(file_of_data):\n with open(file_of_data, 'w') as f:\n writer = csv.writer(f)\n header = (\"Counter\", \"Date/time\", \"Latitude\", \"Longitude\", \"Temperature\", \"Humidity\")\n writer.writerow(header)", "def generate_csv(type, json_list, columns_list):\n with open(\"data/\" + type + \"_\" + time.strftime(\"%Y-%m-%d_%H:%M:%S\") +\n \".csv\", 'a+') as f:\n csv_file = csv.DictWriter(f, fieldnames=columns_list,\n extrasaction=\"ignore\")\n csv_file.writeheader()\n for item in json_list:\n csv_file.writerow(item)\n print(\"\\nCSV file saved as data/\" + type + \"_\" +\n time.strftime(\"%Y-%m-%d_%H:%M:%S\") + \".csv\")", "def write_csv_file (metadata_list, csv_file, append) :\n try :\n with open (csv_file, 'a' if append else 'w' , newline='') as file :\n writer = csv.DictWriter(file, fieldnames=MetadataEntity.get_fieldnames())\n if not append: writer.writeheader()\n for e in metadata_list :\n writer.writerow(e.get_values())\n file.close()\n except :\n print ('ERROR: writing csv file: ' + csv_file)\n return False\n return True", "def write_to_csv(path,data_dict):\n\n\n schema = [\"file_name\",\"family\",\"genus\",\"genus_confidence\",\n \"species_1\",\"confidence_1\",\"hall_1\",\n \"species_2\",\"confidence_2\",\"hall_2\",\n \"species_3\",\"confidence_3\",\"hall_3\",\n \"species_4\",\"confidence_4\",\"hall_4\",\"peaks\"]\n\n # if no file exists create a one and inform the user\n if not os.path.exists(path):\n print(\"creating new output file {}\".format(path))\n with open(path, \"w\") as csv_file:\n filewriter = csv.writer(csv_file, delimiter=\",\")\n filewriter.writerow(schema)\n\n row = []\n\n row.append(data_dict[\"file_name\"])\n row.append(data_dict[\"family\"])\n \n row.append(data_dict[\"genus_1\"])\n row.append(data_dict[\"genus_confidence_1\"][:5])\n \n row.append(data_dict[\"species_1\"])\n row.append(data_dict[\"confidence_1\"][:5])\n row.append(data_dict[\"hall_1\"])\n \n row.append(data_dict[\"species_2\"])\n row.append(data_dict[\"confidence_2\"][:5])\n row.append(data_dict[\"hall_2\"])\n\n row.append(data_dict[\"species_3\"])\n row.append(data_dict[\"confidence_3\"][:5])\n row.append(data_dict[\"hall_3\"])\n\n row.append(data_dict[\"species_4\"])\n row.append(data_dict[\"confidence_4\"][:5])\n row.append(data_dict[\"hall_4\"])\n\n row.append(data_dict[\"peaks\"])\n \n with open(path, \"a\") as csv_file:\n filewriter = csv.writer(csv_file, delimiter=\",\")\n filewriter.writerow(row)", "def write_csv_file(filepath, fieldnames, rows):\n headers = [{'label': field} for field in fieldnames]\n with open(filepath, 'w') as f_buf:\n outfile = CsvWriter()\n outfile.set_headers(headers)\n outfile._datas = rows\n outfile.render(f_buf)", "def makeCSV(self,file_name, data, topList):\n file_name = file_name+\".csv\"\n w = csv.writer(open(file_name, \"w\"))\n w.writerow(topList)\n for key, val in data.items():\n row = list(val)\n row.insert(0,key)\n w.writerow(row)", "def save_dic(dic):\n json.dump(dic, open(\"resources/files/serials.csv\", \"w\"))\n\n\n # w = writer(open(\"resources/files/serials.csv\", \"w\", newline=\"\\n\"))\n # w.writerow(['Return Name', 'Serials'])\n # for name, serials in dic.items():\n # lst = []\n # if name == \"Return Name\":\n # lst.append(name)\n # lst.append(serials)\n # else:\n # for serial in serials:\n # if serial == \"Return Name\":\n # lst.append(serials)\n # else:\n # inner_lst = []\n # for cont in serials[serial]:\n # if cont == \"options\":\n # inner_lst.append(cont + \";;@@;;\" +\n # \";;##;;\".join(\n # serials\n # [serial]\n # [\"options\"]))\n # else:\n # inner_lst.append(\n # cont + \";;@@;;\" + serials[serial][cont])\n # lst.append(serial + ';;:::;;' + \";;!!!;;\".join(inner_lst))\n # w.writerow([(name), (';;,,,;;'.join(lst))])", "def write_scores(mos, outfile):\n with open(outfile, 'w') as csv_file:\n csv_writer = csv.writer(csv_file)\n for key in mos:\n csv_writer.writerow([key, mos[key]])", "def write_csv(self, file):\n # Write header row\n file.write('Timestamp,MessageType,Queue,Price,Volume,OrderID\\n')\n # Write content\n for x in self.records:\n row = (str(x[0]) + ',' + x[1][\"MessageType\"] + ',' +\n x[1][\"Queue\"] + ',' + str(x[1][\"Price\"]) + ',' +\n str(x[1][\"Volume\"]) + ',' + str(x[1][\"OrderID\"]) + '\\n')\n file.write(row)", "def write_results(results):\n fields = results[0].keys()\n with open('results.csv', 'w') as f:\n dw = csv.DictWriter(f, fieldnames=fields, delimiter='|')\n dw.writer.writerow(list(dw.fieldnames))\n dw.writerows(results)", "def save_csv(outfile, movies):\n fieldnames = ['title', 'rating', 'year', 'actors', 'runtime']\n with open('movies.csv', 'w') as outfile:\n writer = csv.DictWriter(outfile, fieldnames=fieldnames)\n writer.writeheader()\n for line in movies:\n writer.writerow(line)\n\n\n # ADD SOME CODE OF YOURSELF HERE TO WRITE THE MOVIES TO DISK", "def writecsvfile(filename, columnnames, data):\n with open(filename, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n writer.writerow(columnnames) # header row\n for row in data:\n writer.writerow(row[:])", "def write_csv(d, f):\n with open(f, 'w') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(d[0])\n for row in d[1]:\n row_encode = list()\n for x in row:\n if type(x) == unicode:\n row_encode.append(x.encode('utf8'))\n else:\n row_encode.append(x)\n writer.writerow(row_encode)\n return True", "def write(self, stream, root, order):\n writer = csv.writer(stream)\n if order:\n writer.writerow(order)\n for row in root:\n if isinstance(row, list):\n writer.writerow(row)\n else:\n if order:\n writer.writerow([row.get(name, '') for name in order])\n else:\n parser.error('Cannot write a dictionary without an order for columns')", "def sort_and_write_dict(dct, fileout, tab_space=28):\n with open(fileout, 'w') as f:\n\n # Iterate through dictionary post-sorting\n for key in sorted(dct):\n\n # Write key, values to dictionary and separate by specified spaces in tab_space\n f.write(\"{k}{v}\\n\".format(k=key.ljust(tab_space), v=dct[key]))", "def save_to_file():\n dict_from_file.update(temp_dict)\n plik=open('data.txt', 'w')\n for key in dict_from_file.keys():\n plik.write(key)\n plik.write(\" \")\n plik.write(str(dict_from_file[key][0]))\n plik.write(' ')\n plik.write(dict_from_file[key][1].replace(' ','_'))\n plik.write(' ')\n plik.write(str(dict_from_file[key][2]))\n plik.write('\\n')", "def _write_dict_to_mdin(self, f, dictionary):\n\n for key, val in dictionary.items():\n if val is not None:\n f.write(\" {:15s} {:s},\\n\".format(key+\" =\", str(val)))\n f.write(\" /\\n\")", "def write_table_to_csv(\n self,\n table: Table,\n path: str,\n header: bool = True,\n dialect: Union[str, Dialect] = Dialect.Excel,\n encoding: Optional[str] = None,\n delimiter: Optional[str] = \",\",\n ):\n self._requires_table(table)\n\n if isinstance(dialect, Dialect):\n dialect_name = dialect.value\n else:\n dialect_name = dialect\n\n with open(path, mode=\"w\", newline=\"\", encoding=encoding) as fd:\n writer = csv.DictWriter(\n fd, fieldnames=table.columns, dialect=dialect_name, delimiter=delimiter\n )\n\n if header:\n writer.writeheader()\n\n for row in table.iter_dicts(with_index=False):\n writer.writerow(row)", "def save_dict_to_file(dictionary: dict, dst_path: str) -> None:\n with io.open(file=dst_path, mode=\"w\", encoding=\"utf-8\") as dst:\n for k, v in dictionary.items():\n dst.write(f\"{k} {v}\\n\")\n dst.close()", "def export_corpus_csv(corpus,path, delimiter = ',', trans_delimiter = '.'):\n word = corpus.random_word()\n header = sorted(word.descriptors)\n with open(path, encoding='utf-8', mode='w') as f:\n print(delimiter.join(header), file=f)\n for key in corpus.iter_sort():\n print(delimiter.join(make_safe(getattr(key, value),trans_delimiter) for value in header), file=f)", "def saveData(filename,data):\r\n\r\n if (type(data) != dict):\r\n raise ValueError(\"The function provided did not return a single dictionary\")\r\n elif not all([isinstance(data[key], collections.Iterable) for key in data.keys()]):\r\n try:\r\n for key,value in data.items():\r\n if (type(value) != list):\r\n data[key] = [value]\r\n except Exception as exception:\r\n raise ValueError(\"The function returned a dictionary with values that arent lists {}\".format(exception))\r\n\r\n with open(filename, 'w') as csvfile:\r\n csvWriter = csv.writer(csvfile, dialect=\"excel\")\r\n\r\n columbNames = []\r\n\r\n for key in data.keys():\r\n columbNames.append(key)\r\n\r\n csvWriter.writerow(columbNames)\r\n\r\n numberRows = max([len(data[key]) for key in columbNames])\r\n coloumbValues = [coloumbValue+[None]*(numberRows-len(coloumbValue)) for coloumbValue in data.values()]\r\n\r\n for i in range(numberRows):\r\n row = [item[i] for item in coloumbValues]\r\n csvWriter.writerow(row)", "def save_csv(outfile, cities):\n writer = csv.writer(outfile)\n writer.writerow(['Name'])\n for row in cities:\n writer.writerow([row])" ]
[ "0.8167452", "0.80611485", "0.79419845", "0.79068524", "0.7865296", "0.786358", "0.7843565", "0.7672604", "0.7624856", "0.7592206", "0.73616445", "0.733697", "0.7334174", "0.7314796", "0.72985274", "0.7286329", "0.72482544", "0.71879077", "0.7168227", "0.71501637", "0.7062585", "0.7046527", "0.70381916", "0.7005685", "0.7005685", "0.7005685", "0.7005685", "0.6931768", "0.69314736", "0.68826896", "0.687289", "0.68313515", "0.6830471", "0.68169695", "0.68169683", "0.6787623", "0.6772172", "0.6746577", "0.6733149", "0.67007726", "0.6681511", "0.66651833", "0.6615163", "0.6595883", "0.6567776", "0.65537226", "0.65281993", "0.649608", "0.64915544", "0.6490684", "0.6484184", "0.64761907", "0.64661866", "0.64613706", "0.64347273", "0.6398523", "0.6383956", "0.6375272", "0.63739306", "0.6370039", "0.6361433", "0.63607943", "0.6345682", "0.63395774", "0.6316101", "0.6311451", "0.6294193", "0.6287044", "0.6260966", "0.6255587", "0.62402195", "0.6230296", "0.6223029", "0.62217206", "0.6215416", "0.6212239", "0.6210511", "0.6195526", "0.61821324", "0.6173098", "0.6170819", "0.61599773", "0.61477", "0.61363834", "0.6129369", "0.61256295", "0.6123626", "0.6122133", "0.6120872", "0.61170554", "0.61102706", "0.6109034", "0.609379", "0.6083162", "0.6062476", "0.60571384", "0.6052776", "0.60513306", "0.6043", "0.60395944" ]
0.77208966
7
Merge CSV files based on keywords.
def merge_csv(csv_files): results = dict() data_all = list() keys = set() for filename in csv_files: data, row_len = load_csv_to_dict(filename) keys |= set(data.keys()) data_all.append((data, row_len)) for key in keys: values = list() for value, row_len in data_all: fill = ["0"]*row_len dt = value[key] if key in value else fill values.extend(dt) results[key] = values return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_csv_files(filelist):\n data = tables.DictList()\n for file_name in filelist:\n reader = csv.DictReader(open(file_name))\n data += list(reader)\n\n return data", "def merge_csv_files(files: List[str], outfile: str):\n\tlogthis.say('Merging files starts.')\n\tdata: pd.DataFrame = pd.DataFrame([], columns=['Label', 'Repo', 'Text'])\n\tnum_files = len(files)\n\tfor index, file in enumerate(files):\n\t\tlogthis.say(f'Merging files {100*(index+1)/num_files:.2f}% {index+1}/{num_files}')\n\t\tdf = pd.read_csv(file, sep=';')\n\t\tdata = pd.concat([df, data])\n\tlogthis.say(f'Write data to {outfile}')\n\tdata.to_csv(outfile, sep=';', index=False)\n\tlogthis.say('Merging files done.')", "def merge_cat(UT):\n csv_path = Path(\"./catalog\"+UT+\".csv\")\n if csv_path.exists() != 1:\n Popen('rm -rf merged'+UT+'.log', shell=True)\n Popen('touch merged'+UT+'.log', shell=True)\n all_files = glob.glob(\"./results/20*/\"+UT+\"/*\")\n print('merging table: {} (1/{})'.format(all_files[0],len(all_files)))\n tab = pd.read_csv(all_files[0])\n cat = tab.copy()\n merged = open('merged'+UT+'.log','a+')\n merged.write(all_files[0]+'\\n')\n try:\n for i, file in enumerate(all_files[1:]):\n print('merging table: {} ({}/{})'.format(file,i+2,len(all_files)))\n tab = pd.read_csv(file)\n cat = pd.merge(cat, tab, how='outer')\n merged.write(file+'\\n')\n cat.to_csv('catalog'+UT+'.csv', index=False, header=True)\n merged.close()\n except:\n cat.to_csv('catalog'+UT+'.csv', index=False, header=True)\n merged.close()\n else:\n cat = pd.read_csv('catalog'+UT+'.csv')\n all_files = glob.glob(\"./results/20*/\"+UT+\"/*\")\n merged = list(pd.read_table('merged'+UT+'.log', header=None).values)\n merged = [i[0] for i in merged]\n if set(all_files) == set(merged):\n print('GOOD NEWS: No new table is needed to be merged.')\n else:\n non_processed = list(set(all_files) - set(merged))\n merged = open('merged'+UT+'.log','a+')\n try:\n for i, new_img in enumerate(non_processed):\n print('merging table: {} ({}/{})'.format(new_img,i+1,len(non_processed)))\n tab = pd.read_csv(new_img)\n cat = pd.merge(cat, tab, how='outer')\n merged.write(new_img+'\\n')\n cat.to_csv('catalog'+UT+'.csv', index=False, header=True)\n merged.close()\n except:\n cat.to_csv('catalog'+UT+'.csv', index=False, header=True)\n merged.close()\n cat = pd.read_csv('catalog'+UT+'.csv')\n m = Table(cat.values, names=cat.columns)\n hdu = fits.table_to_hdu(m)\n hdulist = fits.HDUList([fits.PrimaryHDU(), hdu])\n hdulist.writeto('catalog'+UT+'.fits', overwrite=True)", "def merge(df_list):\n df_final = pd.read_csv(df_list[0])\n for ind, df in enumerate(df_list):\n if ind >= 1:\n temp_df = pd.read_csv(df_list[ind])\n temp_df = temp_df.drop(['lbl'], axis=1)\n df_final = pd.merge(df_final, temp_df, on=['author_id'])\n final_path = os.path.join(os.path.expanduser(\"~/Desktop/Age-Detection\"), \"merged-feature-collection.csv\")\n df_final.to_csv(final_path, sep=',', index=False)\n return final_path", "def merge_files(self, infnames, outfname, csv_header=True):\n assert outfname not in infnames\n start = time.time()\n\n header = ''\n with open(outfname, 'w') as outfile:\n if csv_header: # if not <csv_header>, we'll just end up with a zero-length file\n for fname in infnames:\n if not os.path.exists(fname) or os.stat(fname).st_size == 0:\n continue\n with open(fname) as headfile:\n reader = csv.DictReader(headfile)\n writer = csv.DictWriter(outfile, reader.fieldnames)\n writer.writeheader()\n header = ','.join(reader.fieldnames)\n break\n\n cmd = 'cat ' + ' '.join(infnames) + ' | grep -v \\'^' + header + '$\\' | sort | uniq >>' + outfname\n check_call(cmd, shell=True)\n\n if not self.args.no_clean:\n for infname in infnames:\n os.remove(infname)\n\n print ' time to merge csv files: %.3f' % (time.time()-start)", "def merge_files():\n # abs path of data folder\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\\\\KORD\")\n file_list = os.listdir(work_folder)\n with open(os.path.join(work_folder, \"..\\\\merged_history_KORD.csv\"), \"w\") as outfile:\n for line in open(os.path.join(work_folder, file_list[0])):\n outfile.write(line)\n print \"write the first line\"\n for i in range(1, len(file_list)):\n with open(os.path.join(work_folder, file_list[i])) as infile:\n infile.next()\n for line in infile:\n outfile.write(line)", "def merge(parent_folder):\n parent_folder = Path(parent_folder)\n\n address_csv_files = sorted(parent_folder.glob('*_step_*.csv'))\n\n frames = []\n\n #: read all csv's delimiter='|', quoting=csv.QUOTE_MINIMAL\n for address_csv_file in address_csv_files:\n temp = pd.read_csv(\n address_csv_file, sep='|', encoding='utf-8', names=['type', 'id', 'county', 'senate', 'house', 'census']\n )\n\n frames.append(temp)\n\n #: merge all csv's\n merged = pd.concat(frames)\n merged.to_csv(parent_folder / 'all.csv', sep='|', header=False, index=False, encoding='utf-8')", "def _merge_files(parse_results: Iterable[ParseResult]) -> Iterable[ParseResult]:\n return map(_merge_records, groupby_file(parse_results))", "def merge_csv_daily(output_filename, path):\n\n # import csv files from folder\n allFiles = glob.glob(path + \"*.csv\")\n\n with open(output_filename, 'wb+') as outfile:\n for i, fname in enumerate(allFiles):\n with open(fname, 'rb') as infile:\n if i != 0:\n infile.readline() # Throw away header on all but first file\n # Block copy rest of file from input to output without parsing\n shutil.copyfileobj(infile, outfile)\n # print(fname + \" has been imported.\")\n\n # adding MissingObs column back:\n df = pd.read_csv(output_filename, header=0, sep=',', index_col=[0,1], parse_dates=False)\n df.insert(loc=3, column='MissingObs', value=np.zeros((df.shape[0], )))\n df.to_csv(output_filename, sep=',')\n\n return output_filename", "def merge(path_to_mf_lf_1, path_to_mf_lf_2, path_to_mf_hf_1, path_to_mf_hf_2, path_to_sf,\n path_to_mf_lf, path_to_mf_hf, path_to_sf_copy):\n mf_lf = merge_one_case(pd.read_csv(path_to_mf_lf_1, index_col=0), pd.read_csv(path_to_mf_lf_2, index_col=0))\n mf_hf = merge_one_case(pd.read_csv(path_to_mf_hf_1, index_col=0), pd.read_csv(path_to_mf_hf_2, index_col=0))\n sf = pd.read_csv(path_to_sf, index_col=0) # only bypassed\n mf_lf.to_csv(path_to_mf_lf, index=True, header=True)\n mf_hf.to_csv(path_to_mf_hf, index=True, header=True)\n sf.to_csv(path_to_sf_copy, index=True, header=True)", "def data_merge(detector_fields):\n print(\"Merging final data...\")\n\n # load files that contain phase and I/O processed data and store as dfs\n phase_data = pd.read_csv(results_folder + 'phases/processed/clean_merged_phases.csv', header=0,\n skipinitialspace=True, usecols=output_fields)\n detection_data = pd.read_csv(results_folder + 'io/io_out.csv', header=0, skipinitialspace=True,\n usecols=detector_fields)\n phase_df = pd.DataFrame(phase_data)\n detection_df = pd.DataFrame(detection_data)\n\n # merge the two files based on their Date and Time fields\n output = pd.merge(phase_df, detection_df, on=['Date', 'Time'])\n\n # store the output with any duplicates dropped and create a final CSV file\n merged_df = output.drop_duplicates()\n merged_df.to_csv(results_folder + 'dataset.csv', sep=',', index=False)\n\n print(\"Data merged!\")\n print(\"Main dataset available: \" + results_folder + 'dataset.csv')\n\n # return location of dataset\n return results_folder + 'dataset.csv'", "def main():\n location = os.getcwd()\n header = \"Date,Time,Voltage,Current,Isolation,Range,SoC,Distance,Fan rpm,Fan Torque,Hyd. Pump rpm,Hyd. Pump Torque,SW Pump rpm,SW Pump Torque,Nozzle,Sidebrushes,WideSweepBrush,TempIGBT-Fan,Fan motor temp, Traction rpm, Traction torque,BMS1 Volts, BMS2 Volts\"\n header = header+\"\\n\"\n\n of =\"outFile.csv\"\n outFile = open(of, \"w\")\n outFile.write(header)\n\n for file in os.listdir(location ):\n try:\n if file.endswith(\".csv\") and not(file.startswith(\"outFile\")):\n print(\"...reading {}\".format(file))\n fcsv = csv.reader(open(file, newline=''), delimiter=' ', quotechar='|') \n for row in fcsv:\n line = ', '.join(row)\n if line[:4] == \"Date\":\n d = line[5:13]\n dd = d[6:9]+\"/\"+d[4:6]+\"/\"+d[:4]\n next\n elif line[12] == \"*\" or line[0] == \"*\":\n next\n elif line[0] == \"T\":\n next\n else:\n L = dd + \",\" + line + \"\\n\"\n outFile.write(L)\n except Exception as e:\n raise e\n print(\"No CSV files in here!\")\n\n try: \n print(\"\\nAll files have been merged into: {}\".format(of))\n outFile.close()\n \n except Exception as ee:\n raise ee", "def merge_data(csv_files, delimiter = ',', parse_dates = ['Date']):\n \n for csv in csv_files:\n \n # date formats in source data is slightly different (/2019 vs. /19), \n # TODO: check for better method to catch this error\n \n \n try:\n df_new = pd.read_csv(csv, parse_dates=parse_dates,\n date_parser=lambda x: pd.datetime.strptime(str(x), '%d/%m/%Y'), delimiter=delimiter)\n \n except:\n df_new = pd.read_csv(csv, parse_dates=parse_dates,\n date_parser=lambda x: pd.datetime.strptime(str(x), '%d/%m/%y'), delimiter=delimiter)\n \n \n\n \n df_new['season'] = df_new.Date.max().year # add season column, defined as the year of the last matchday\n df_new['first_match_day'] = False \n df_new.loc[0:9, 'first_match_day'] = True # declare first 10 games as first match day\n df_new['matchDay'] = 0\n \n try:\n df = df.append(df_new,sort=False)\n except:\n df = df_new\n \n return df", "def data_merge(path, dataset_name=\"processed_data\"):\n files = glob.glob(path+\"**//\"+dataset_name+\".json\")\n logger.info(\"Found {} files under the path {}\".format(len(files),path))\n final_data = []\n\n for file in files:\n assert dataset_name in file\n data = json.load(open(file,\"r\",encoding=\"utf-8\"))\n final_data += data\n\n data_analysis(final_data)\n final_data = json.dumps(final_data,indent=4)\n new_file = open(path + \"//merged_data.json\", \"w+\", encoding=\"UTF-8\")\n new_file.writelines(final_data)", "def merge_files(locale, fail_if_missing=True):\r\n for target, sources in CONFIGURATION.generate_merge.items():\r\n merge(locale, target, sources, fail_if_missing)", "def concat_vsource_sink_csv(csv_fn1,csv_fn2,merged_source_sink_in,\n csv_type,csv_merged,freq='infer',how='left'):\n # merged_source_sink_in: the merged source_sink.in or source_sink.yaml file \n # where the data sources are from csv_fn1, csv_fn2. \n if merged_source_sink_in.endswith('yaml'):\n df_sources,df_sinks = read_source_sink_yaml(merged_source_sink_in)\n elif merged_source_sink_in.endswith('in'):\n df_sources,df_sinks = read_source_sink_in(merged_source_sink_in)\n else:\n raise NotImplementedError(\n 'merged_source_sink_in can either be .yaml or .in file')\n if csv_type == 'sources':\n sites = df_sources.index\n elif csv_type == 'sink':\n sites = df_sinks.index\n else:\n raise NotImplementedError('csv_type can either be sources or sinks')\n th1 = read_source_sink_csv(csv_fn1)\n th2 = read_source_sink_csv(csv_fn2)\n if freq=='infer':\n if th1.index.freq!=th2.index.freq:\n print(\"th1 and th2 has different frequency\")\n else:\n th1 = th1.asfreq(freq)\n th2 = th2.asfreq(freq)\n th_merged = th1.join(th2,how=how,rsuffix='r').drop(columns=['datetimer'])\n th_merged = th_merged.fillna(-9999.0)\n cols = np.append(['datetime'],sites)\n th_merged = th_merged[cols] #rearrange the array to have the same order as defined in merged_source_sink_in\n th_merged['datetime'] = np.datetime_as_string(th_merged.index.values,'h')\n write_source_sink_csv(th_merged,csv_merged)", "def _merge_files(files: List[str], output: str, delete: bool = True) -> None:\r\n\r\n if not files:\r\n return\r\n\r\n first = True\r\n\r\n ## Open the single concatenated output file\r\n with open(output, 'w') as outfl:\r\n\r\n ## Loop through input files...\r\n for fpath in files:\r\n\r\n ## Read each input file and format line x line\r\n with open(fpath, 'r') as infl:\r\n\r\n if not first:\r\n ## Skip the header\r\n next(infl)\r\n else:\r\n first = False\r\n\r\n outfl.write(infl.read())\r\n\r\n ## Remove the file once we're done\r\n if delete:\r\n Path(fpath).unlink()", "def get_train_csv(self):\n try:\n self.train_article = pd.read_csv(constants.DATA_DIR / 'knn_article_tags.csv')\n except FileNotFoundError:\n train = pd.Series([])\n for csv_file in os.listdir(constants.CLEAN_DIR):\n if csv_file in self.article_feat_csvs:\n df = pd.read_csv(constants.CLEAN_DIR / csv_file)\n feat = csv_file[8:-4]\n g = df.dropna(axis=0).groupby(\"id\")[self.tag_ref[feat]]\n if train.empty:\n train = g.apply(lambda x: list(x.astype(str).str.lower()))\n else:\n g = g.apply(lambda x: list(x.astype(str).str.lower()))\n train = train.combine(g, lambda x1, x2: list(set(x1+x2)), fill_value=[])\n\n train = pd.DataFrame({'id':train.index, 'tags':train.values})\n train.to_csv(constants.DATA_DIR / 'knn_article_tags.csv', header=True)\n self.train_article = train\n\n try:\n self.train_image = pd.read_csv(constants.DATA_DIR / 'knn_image_tags.csv')\n except FileNotFoundError:\n train = pd.Series([])\n for csv_file in os.listdir(constants.CLEAN_DIR):\n if csv_file in self.image_feat_csvs:\n df = pd.read_csv(constants.CLEAN_DIR / csv_file)\n feat = csv_file[6:-4]\n g = df.dropna(axis=0).groupby(\"id\")[self.tag_ref[feat]]\n if train.empty:\n train = g.apply(lambda x: list(x.astype(str).str.lower()))\n else:\n g = g.apply(lambda x: list(x.astype(str).str.lower()))\n train = train.combine(g, lambda x1, x2: list(set(x1+x2)), fill_value=[])\n\n train = pd.DataFrame({'id':train.index, 'tags':train.values})\n train.to_csv(constants.DATA_DIR / 'knn_image_tags.csv', header=True)\n self.train_image = train", "def annotation_csv_consolidator(filenames, sessions=[], subjects=[], sensors=[]):\n \n single_sess_annotats = []\n c = 0\n for filename in filenames:\n single_sess_annotat = annotation_csv_importer(filename)\n if np.iterable(sessions) and len(sessions) == len(filenames):\n single_sess_index = [sessions[c],]*single_sess_annotat.shape[0]\n single_sess_annotat['session'] = single_sess_index\n if np.iterable(subjects) and len(subjects) == len(filenames):\n single_subj_index = [subjects[c],]*single_sess_annotat.shape[0]\n single_sess_annotat['subject'] = single_subj_index\n if np.iterable(sensors) and len(sensors) == len(filenames):\n single_sensor_index = [sensors[c],]*single_sess_annotat.shape[0]\n single_sess_annotat['sensor'] = single_sensor_index\n c += 1\n single_sess_annotats.append(single_sess_annotat)\n consolidate_annotation_data = pd.concat(single_sess_annotats)\n # consolidate_annotation_data = consolidate_annotation_data.reset_index(drop=False)\n # consolidate_annotation_data = consolidate_annotation_data.rename(columns={\"index\":\"index per sensor\"})\n return consolidate_annotation_data", "def county_file_merger(folder_path):\n\n print(\"\\n*******************--- Starting File Merger for .csv files ---*******************\")\n with open(\"result.csv\",\"wb\") as outfile:\n for filename in os.listdir(folder_path):\n with open(filename,\"rb\") as infile:\n for line in infile:\n outfile.write(line)\n infile.close()\n outfile.close()\n print(\"\\nResult saved to -----> result.csv \")\n print(\"\\n*******************--- Finished File Merger for .csv files ---*******************\")", "def main(argv):\n\n \n\n if validate_argv(argv) is False:\n print \"Usage: mergeFiles.py <search_term>\"\n sys.exit()\n\n input_directory_name = 'data_raw'\n search_term = argv[0]\n output_file_name = search_term + '_merged.tsv'\n output_directory_name = 'merged'\n\n\n output_path = fp.set_output_file_path(output_file_name, output_directory_name) \n output = open(output_path, 'a')\n for h1 in range(3):\n for h2 in range(10):\n for m1 in range(6):\n for m2 in range(10):\n file_name = search_term + '_' + str(h1) + str(h2) + str(m1) + str(m2) + '.tsv'\n file_path = fp.get_file_path(file_name, input_directory_name)\n if fp.filename_exists(file_path):\n file = open(file_path, 'r')\n file.next()\n for line in file:\n output.write(line)\n file.close()\n output.close()", "def sync_csv(arg):\n files = os.listdir(arg.input_dir)\n file_map = dict()\n for f in files:\n label = os.path.splitext(f)[0].split('_')\n if len(label) < 2:\n continue\n if file_map.get(label[0], None) is None:\n file_map[label[0]] = dict()\n if label[1] == 'grid':\n file_map[label[0]]['grid'] = f\n else:\n if file_map[label[0]].get('data', None) is None:\n file_map[label[0]]['data'] = dict()\n if file_map[label[0]]['data'].get(label[1], None) is None:\n file_map[label[0]]['data'][label[1]] = []\n file_map[label[0]]['data'][label[1]].append(f)\n tar_name = 't{}'.format(datetime.now().strftime('%Y%m%d'))\n tar_path = os.path.join(arg.output_dir, 'tar', tar_name)\n if not os.path.exists(tar_path):\n os.mkdir(tar_path)\n i = 0\n n = len(file_map)\n for city, v in file_map.items():\n i = i + 1\n print('------ handle city [{}/{}]: {} -------'.format(i, n, city))\n city_csv_path = os.path.join(arg.output_dir, 'csv', city)\n city_sql_path = os.path.join(arg.output_dir, 'sql', city)\n if not os.path.exists(city_csv_path):\n os.mkdir(city_csv_path)\n if not os.path.exists(city_sql_path):\n os.mkdir(city_sql_path)\n grid = v.get('grid', None)\n if grid:\n grid = os.path.splitext(grid)[0]\n print('** handling grid ...')\n tar_sql = os.path.join(tar_path, '{}.sql.gz'.format(grid))\n if os.path.exists(tar_sql):\n print('****** {} exist!'.format(tar_sql))\n else:\n shutil.copyfile(os.path.join(arg.input_dir, '{}.csv'.format(grid)),\n os.path.join(city_csv_path, '{}.csv'.format(grid)))\n trans_grid(city, city_csv_path, city_sql_path)\n print('****** GZIP grid sql')\n with open(os.path.join(city_sql_path, '{}.sql'.format(grid))) as fi, gzip.open(\n os.path.join(tar_path, '{}.sql.gz'.format(grid)), 'wb') as fo:\n fo.write(fi.read().encode())\n data = v.get('data', None)\n if data:\n print('** handling data ...')\n for week, data_files in data.items():\n print('**** week: {}'.format(week))\n tar_detail = os.path.join(tar_path, '{}_{}_detail.sql.tar.gz'.format(city, week))\n if os.path.exists(tar_detail):\n print('****** {} exist!'.format(tar_detail))\n else:\n for data_file in data_files:\n shutil.copyfile(os.path.join(arg.input_dir, data_file), os.path.join(city_csv_path, data_file))\n create_detail(city, week, 30000, city_csv_path, city_sql_path)\n print('****** TAR detail sql')\n with tarfile.open(tar_detail, 'w:gz') as f:\n for city_week_detail in os.listdir(city_sql_path):\n if city_week_detail.startswith('{}_{}_detail'.format(city, week)):\n f.add(os.path.join(city_sql_path, city_week_detail), arcname=city_week_detail)\n print('****** remove csv and sql file...')\n for data_file in data_files:\n os.remove(os.path.join(city_csv_path, data_file))\n sql_files = os.path.join(city_sql_path, '{}_{}_detail*sql'.format(city, week))\n for sql_file in glob.glob(sql_files):\n os.remove(sql_file)", "def merge_breached():\n # read breach and CMS data\n breachdf = pd.read_csv(updated_breach_file_name, encoding='latin1')\n breachdf.rename(columns=lambda x: x.strip(), inplace=True)\n print(breachdf.isnull().sum().sum(), \"null columns\")\n\n df = pd.read_csv(CMS_file_name, encoding='latin1')\n df.rename(columns=lambda x: x.strip(), inplace=True)\n\n print(\"dataframes read\")\n\n # merge data\n new_df = df.merge(breachdf, left_on='FAC_NAME', right_on='FAC_NAME', how='outer')\n print(\"merged\", new_df)\n\n new_df.to_csv(merged_file_name, index=False)\n print(\"Written to\", merged_file_name)", "def merge_csv_initial(output_filename, path):\n\n prefix = ['ParticipantID',\n 'igtb.datatime',\n 'igtb.timezone']\n\n names = ['irb',\n 'itp',\n 'ocb',\n 'inter.deviance',\n 'org.deviance',\n 'shipley.abs',\n 'shipley.vocab',\n 'neuroticism',\n 'conscientiousness',\n 'extraversion',\n 'agreeableness',\n 'openness',\n 'pos.affect',\n 'neg.affect',\n 'stai.trait',\n 'audit',\n 'gats.quantity',\n 'ipaq',\n 'psqi',\n 'gats.status']\n\n\n \n\n #b = np.loadtxt(path + names[0] + '.csv', delimiter=\",\", skiprows=1, usecols=(0, 1, 2), dtype=object)\n #a = np.array(b, dtype=object)\n\n for i,n in enumerate(names):\n file = path + n + '.csv'\n if(i==0):\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,1,2,3]) \n df_all = df\n else:\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,3]) \n df_all=pd.concat([df_all,df],axis=1)\n \n df_all=df_all.reset_index() \n a = df_all.as_matrix()\n\n # column_format = '%20s %10s %10s %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f'\n # column_format = '%20s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s'\n column_format = '%20s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s'\n names_string = ','.join(prefix + names)\n\n print(a.shape)\n\n np.savetxt(output_filename, a, delimiter=\",\", fmt=column_format, comments='', header=names_string)\n\n return output_filename", "def combine_syllable_csv():\n reading_csv = pd.read_csv(sys.argv[1])\n memory_csv = pd.read_csv(sys.argv[2])\n cookie_csv = pd.read_csv(sys.argv[3])\n merged = reading_csv.merge(memory_csv, on=TRANSCRIPT_ID)\n merged = merged.merge(cookie_csv, on=TRANSCRIPT_ID)\n merged.to_csv('jan27_merged_syllabus.csv', sep=',', header=True, index=False)", "def merge_duplicate_psm_rows(\n csv_file_path=None,\n psm_counter=None,\n psm_defining_colnames=None,\n psm_colnames_to_merge_multiple_values={},\n joinchar=\"<|>\",\n overwrite_file=True,\n):\n rows_to_merge_dict = defaultdict(list)\n\n if overwrite_file:\n tmp_file = csv_file_path + \".tmp\"\n os.rename(csv_file_path, tmp_file)\n out_file = csv_file_path\n else:\n tmp_file = csv_file_path\n out_file = csv_file_path.strip(\".csv\") + \"_merged_duplicates.csv\"\n UNode.print_info(\"Merging rows of the same PSM...\", caller=\"postflight\")\n # print('Merging rows of the same PSM...')\n csv_kwargs = {}\n if sys.platform == \"win32\":\n csv_kwargs[\"lineterminator\"] = \"\\n\"\n else:\n csv_kwargs[\"lineterminator\"] = \"\\r\\n\"\n with open(tmp_file, \"r\") as tmp, open(out_file, \"w\", newline=\"\") as out:\n tmp_reader = csv.DictReader(tmp)\n writer = csv.DictWriter(out, fieldnames=tmp_reader.fieldnames, **csv_kwargs)\n writer.writeheader()\n for row in tmp_reader:\n psm = tuple([row[x] for x in psm_defining_colnames if x in row.keys()])\n # each unique combination of these should only have ONE row!\n # i.e. combination of seq+spec+score\n if psm_counter[psm] == 1:\n # no duplicate = no problem, we can just write the row again\n writer.writerow(row)\n elif psm_counter[psm] > 1:\n # we have to collect all rows of this psm, and merge + write\n # them later!\n rows_to_merge_dict[psm].append(row)\n else:\n raise Exception(\"This should never happen.\")\n # finished parsing the old unmerged unified csv\n for rows_to_merge in rows_to_merge_dict.values():\n writer.writerow(\n merge_rowdicts(\n rows_to_merge,\n psm_colnames_to_merge_multiple_values,\n joinchar=joinchar,\n )\n )\n # remove the old unified csv that contains duplicate rows\n if overwrite_file:\n os.remove(tmp_file)\n UNode.print_info(\"Done.\", caller=\"postflight\")\n return out_file", "def merge_training_data():\r\n line_data = []\r\n with open(\"Training_Data.txt\", \"w\") as file:\r\n for training_data_language in [\"Training_EN.txt\", \"Training_NL.txt\"]:\r\n with open(training_data_language, \"r\") as language_file:\r\n read_text = language_file.readlines()\r\n for line in read_text:\r\n file.write(line)\r\n\r\n # Ensuring no trailing spaces etc\r\n with open(\"Training_Data.txt\", \"r\") as file:\r\n lines = file.readlines()\r\n for line in lines:\r\n line = line.strip().split(\", \")\r\n line_data.append(line)\r\n\r\n with open('Training_Data_CSV.csv', 'w') as Training_Data_CSV:\r\n writer = csv.writer(Training_Data_CSV)\r\n writer.writerows([['COLUMN1', 'COLUMN2', 'COLUMN3', 'COLUMN4', 'COLUMN5', 'COLUMN6', 'COLUMN7', 'COLUMN8',\r\n 'COLUMN9', 'COLUMN10', 'Label']])\r\n writer.writerows(line_data)\r\n\r\n print(\"Training Data merged and CSV created\")", "def combine_files(file_name):\n\n\tif file_name == \"train\":\n\n\t\tif os.path.isfile(\"./Data/Level1_model_files/Train/all_level1_train.csv\"):\n\t\t\tos.remove(\"./Data/Level1_model_files/Train/all_level1_train.csv\")\n\n\t\tlist_files = glob(\"./Data/Level1_model_files/Train/*.csv*\")\n\t\tlist_df = []\n\t\tfor f in list_files :\n\t\t\tlist_df.append(pd.read_csv(f))\n\n\t\tfor i in range(1,len(list_df)):\n\t\t\tlist_df[i] = list_df[i].drop([\"Response\", \"Id\"],1)\n\n\t\t# Concat\n\t\tdf_out = pd.concat(list_df, axis=1)\n\t\t# Order columns\n\t\tlist_col = df_out.columns.values.tolist()\n\t\tlist_col = sorted(list_col)\n\t\tlist_col.remove(\"Response\")\n\t\tlist_col.remove(\"Id\")\n\t\tlist_col = [\"Id\"] + list_col + [\"Response\"]\n\t\tdf_out = df_out[list_col]\n\t\tdf_out.to_csv(\"./Data/Level1_model_files/Train/all_level1_train.csv\", index = False)\n\n\telif file_name == \"test\":\n\n\t\tif os.path.isfile(\"./Data/Level1_model_files/Test/all_level1_test.csv\"):\n\t\t\tos.remove(\"./Data/Level1_model_files/Test/all_level1_test.csv\")\n\n\t\tlist_files = glob(\"./Data/Level1_model_files/Test/*.csv*\")\n\t\tlist_df = []\n\t\tfor f in list_files :\n\t\t\tlist_df.append(pd.read_csv(f))\n\n\t\tfor i in range(1,len(list_df)):\n\t\t\tlist_df[i] = list_df[i].drop(\"Id\",1)\n\n\t\t# Concat\n\t\tdf_out = pd.concat(list_df, axis=1)\n\t\t# Order columns\n\t\tlist_col = df_out.columns.values.tolist()\n\t\tlist_col = sorted(list_col)\n\t\tlist_col.remove(\"Id\")\n\t\tlist_col = [\"Id\"] + list_col \n\t\tdf_out = df_out[list_col]\n\t\tdf_out.to_csv(\"./Data/Level1_model_files/Test/all_level1_test.csv\", index = False)", "def merge_cooccur(args):\n if not args.quiet:\n logger.setLevel(logging.INFO)\n\n merged = dict.fromkeys(['mat', 'tokenizer', 'window_size', 'uniform_count'])\n with tqdm(total=len(args.cooccurfiles), ncols=80, disable=args.quiet) as prog:\n for file in args.cooccurfiles:\n # load the data\n corpus = load_corpus(file)\n\n if merged['tokenizer'] is None:\n merged['tokenizer'] = corpus._tokenizer.to_str()\n\n if merged['window_size'] is None:\n merged['window_size'] = corpus.window_size\n\n if merged['uniform_count'] is None:\n merged['uniform_count'] = corpus.uniform_count\n\n mat = corpus.mat.astype('float32')\n if args.symmetrization:\n mat = (mat + mat.T.tocoo()).tocoo()\n\n if merged['mat'] is None:\n merged['mat'] = mat\n else:\n merged['mat'] += mat\n\n prog.update()\n merged['mat'] = merged['mat'].tocoo()\n\n # save output\n logger.info('Saving to disk...')\n out_fn = join(args.path, args.out)\n with open(out_fn, 'wb') as fp:\n pkl.dump(\n {\n 'mat': {\n 'row': merged['mat'].row,\n 'col': merged['mat'].col,\n 'counts': merged['mat'].data\n },\n 'tokenizer': merged['tokenizer'],\n 'uniform_count': merged['uniform_count'],\n 'window_size': merged['window_size']\n },\n fp\n )", "def import_directory_csv(d_in, d_out, target_column, merge_columns):\n\n INPUT_FILES = grab_files(\"*.csv\", d_in)\n\n if not INPUT_FILES:\n logger.warning(\"No matching CSV files found, exiting\")\n exit(2)\n\n for f_csv in INPUT_FILES:\n f_csv_out = os.path.join(d_out, os.path.basename(f_csv))\n vals = (f_csv, f_csv_out, target_column, merge_columns)\n import_csv(vals)", "def combine_data(data_file_1,data_file_2,lookup,outfile):\n # Read in tabbed data\n print(\"Reading in data from %s\" % data_file_1)\n data1 = IndexedFile(data_file_1,first_line_is_header=True)\n print(\"Reading in data from %s\" % data_file_2)\n data2 = IndexedFile(data_file_2,first_line_is_header=True)\n\n # Open output file\n fp = io.open(outfile,'wt')\n\n # Call main function to do the actual work\n combine_data_main(data1,data2,lookup,fp)\n\n # Finished\n fp.close()\n print(\"Output written to '%s'\" % outfile)", "def generate_csv(filename='', datafile='queryData.txt', keywords=mainKeywords):\n\n fulltitle = os.path.join(os.getcwd(), 'results', 'searchWoKResults', filename + 'MaterialsKeySearchFull.csv')\n contitle = os.path.join(os.getcwd(), 'results', 'searchWoKResults', filename + 'MaterialsKeySearchCondensed.csv')\n datatitle = os.path.join(os.getcwd(), 'results', 'searchWoKResults', filename + datafile)\n\n with open(fulltitle, 'wt') as csvFull, open(contitle, 'wt') as csvCon, open(datatitle, 'rt') as data:\n fwriter = csv.writer(csvFull, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n cwriter = csv.writer(csvCon, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n querydata = json.load(data)\n\n conheader = ['Material', 'Publications', 'Crystal System', 'Space Group', 'Calculated Band Gap']\n for n in keywords:\n conheader.append(n)\n cwriter.writerow(conheader)\n\n linenum = 0\n\n print('GETTING KEYWORD LIST:')\n print('Searching for ' + str(keywords) + '\\n')\n\n for searchData in querydata:\n print('Searching through ' + searchData[0]['material'] + ' data')\n\n keylist = searchWoKTools.getkeylist(searchData, keywords)\n\n print('Generating clouds')\n wc = searchWoKTools.generateabstractwc(searchData)\n imgpath = os.path.join(os.getcwd(), 'results', 'searchWoKResults', filename, searchData[0]['material'] + '.png')\n wc.to_file(imgpath)\n\n print('Writing CSV')\n fwriter.writerow([searchData[0]['material'],\n str(searchData[0]['numResults']) + ' publications',\n searchData[0]['crystalsystem'],\n searchData[0]['spacegroup'] + ' spacegroup',\n searchData[0]['bandgap'] + ' band gap',\n searchData[0]['searchURL'],\n '=HYPERLINK(\"' + imgpath + '\",\"Word Cloud\")'])\n linenum += 1\n\n conline = [\n '=HYPERLINK(\"[' + fulltitle + ']' + filename + 'MaterialsKeySearchFull' + '!A' + str(linenum) + '\",\"' +\n searchData[0]['material'] + '\")',\n\n str(searchData[0]['numResults']),\n searchData[0]['crystalsystem'],\n searchData[0]['spacegroup'],\n searchData[0]['bandgap']]\n\n fwriter.writerow([])\n linenum += 1\n\n for key in keylist.keys():\n keyrow = []\n conkeynum = 0\n for n in range(len(keylist[key])):\n if keylist[key][n] != 0:\n cellstring = '=HYPERLINK(\"' + searchData[1][n]['DOIlink'] + '\",\"' + key + '(' + str(\n keylist[key][n]) + ')\")'\n keyrow.append(cellstring)\n conkeynum += 1\n if keyrow:\n fwriter.writerow(keyrow)\n linenum += 1\n if conkeynum != 0:\n constring = '=HYPERLINK(\"[' + fulltitle + ']' + filename + 'MaterialsKeySearchFull' + '!A' + str(\n linenum) + '\",\"' + str(conkeynum) + '\")'\n conline.append(constring)\n else:\n conline.append('')\n\n cwriter.writerow(conline)\n\n fwriter.writerow([])\n fwriter.writerow([])\n linenum += 2\n\n return", "def merge():\n result = []\n for f in glob.glob(f\"{DATA_DIR}/COP*.json\"):\n with open(f, \"r\") as infile:\n result.append(json.load(infile))\n\n with open(f\"{DATA_DIR}/corpus.json\", \"w\", encoding=\"utf-8\") as outfile:\n json.dump(result, outfile)", "def import_csv(item):\n (f_csv, f_csv_out, target_column, merge_columns) = item\n has_checked_keys = False\n\n if not merge_columns:\n raise ValueError(\"merge_columns must not be empty\")\n\n with open(f_csv_out, \"w\") as FOUT:\n CSV_HANDLE = None\n total_rows = 0\n\n for row in csv_iterator(f_csv):\n\n output = {\"_ref\": next(_ref_counter)}\n\n if not has_checked_keys:\n for key in merge_columns:\n if key not in row.keys():\n msg = \"Column **{}** not in csv file {}\"\n raise KeyError(msg.format(key, f_csv))\n has_checked_keys = True\n\n if target_column in row.keys():\n msg = \"Generated column **{}** already in csv file {}\"\n raise KeyError(msg.format(target_column, f_csv))\n\n text = []\n for key in merge_columns:\n val = row[key].strip()\n if not val:\n continue\n if val[-1] not in \".?!,\":\n val += \".\"\n text.append(val)\n\n output[target_column] = \"\\n\".join(text).strip()\n\n if CSV_HANDLE is None:\n CSV_HANDLE = csv.DictWriter(FOUT, sorted(output.keys()))\n CSV_HANDLE.writeheader()\n\n CSV_HANDLE.writerow(output)\n total_rows += 1\n\n logger.info(\"Imported {}, {} entries\".format(f_csv, total_rows))", "def merge_files(filename_list, merged_file, encode):\n lines_counter = list()\n for file_name in filename_list:\n lines_counter.append(count_lines(file_name))\n lines_counter.sort(key=lambda item: item[-1])\n with open(merged_file, 'w', encoding=encode) as file:\n for doc in lines_counter:\n file.write(f'{doc[0]}\\n')\n file.write(f'{doc[1]}\\n')\n text = get_text(doc[0])\n file.write(f'{text}\\n\\n')", "async def collate_similar_data(input_csv_file_path, output_csv_file_path):\n if not input_csv_file_path or not output_csv_file_path:\n return\n with open(output_csv_file_path, 'w') as file_object:\n csv_writer = csv.writer(file_object, delimiter=',')\n csv_writer.writerow(\n ('Account ID', 'First Name', 'Created On', 'Status',\n 'Status Set On'))\n for csv_row in read_csv_file(input_csv_file_path):\n account_status = (await fetch_account_status(csv_row[0]))\n csv_writer.writerow(csv_row + (\n account_status.get('status', ''),\n datetime.datetime.strftime(\n datetime.datetime.strptime(\n account_status.get('created_on'), '%Y-%m-%d'),\n '%Y-%m-%d') if account_status.get('created_on') else ''))", "def pre_process_multispace(filepath, delimiter=\" \"):\n newpath = filepath+\".rev.csv\"\n with open(filepath, \"r\") as src_csv_file:\n with open(newpath, \"w\") as dst_csv_file:\n for src_line in src_csv_file:\n dst_csv_file.write(delimiter.join(src_line.split())+\"\\n\")", "def merge_all_data(self):\n\n logging.info('***** Starting the merging process merge_all_data')\n\n \"\"\" All possible unique_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n date_times = np.array(date_times) \n\n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, = [] , [] , [] , [] , []\n best_ds_list = [] \n source_files = []\n station_configurations = []\n\n \"\"\" The items contained in the lists in the list below can be removed from the list when the record that was previously stored is removed. \"\"\"\n all_list = [all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, best_ds_list, source_files , station_configurations ] # holder of all the above lists\n all_list_name = ['all_combined_obs' , 'all_combined_head', 'all_combined_era5fb' , 'combined_indices' , 'combined_date_time' , 'best_ds_list', 'source_files' ] \n \n removed_record, kept_record = [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #dt_bestds_dic = {} # store the selected best dataset for each dt \n #date_times=date_times[0:30000]\n tot = len(date_times)\n tt=time.time()\n print('*** Merging ' , tot, ' records ***')\n \n early_datasets = True\n \n self.processed_dt = [] \n \n for dt, c in zip(date_times, range(tot) ): # loop over all the possible date_times \n\n if (c+1)%1000==0:\n print('Analize : ', str(c+1) , '/', str(tot) , ' ', dt , ' ',\n now(time.time()),'{:5.3f}'.format(time.time()-tt ))\n\n delete = self.delete_ds(dt) # check if there is a dataset to delete \n \n \"\"\" Finding if this record is the same as the previous one analyzed, according to the given time_shift \"\"\"\n if c == 0:\n is_same_record = False\n else:\n is_same_record = self.is_same_record( time_shift = self.hour_time_delta , dt = dt)\n \n \"\"\" Updating list of processed datetimes \"\"\"\n self.processed_dt.append(dt) # cannot put it before the check_timeshift or it will check itself \n\n \n cleaned_df_container = {} \n all_len = [] # will hold the length of all the obs_tabs \n \n for k in self.dataset_per_dt[dt].keys() : # checking the list of available datasets \n ''' {'era5_2': ['example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._1:82930.gz.nc', \n 'example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._82930.gz.nc']}\n ''' \n for F in self.dataset_per_dt[dt][k]: # checking the list of available files for the dataset\n \n if data[k][F][\"counter\"] %self.slice_size==0 or data[k][F][\"counter\"] == 0: # loading the data only at specific slices \n load = self.load_obstab_feedback_sliced(datetime=dt, dataset=k, file = F)\n \n data[k][F][\"counter\"] = data[k][F][\"counter\"] + 1 \n \n obs_tab, era5fb_tab = self.make_obstab_era5fb_dic(dataset = k , date_time = dt, File = F )\n\n if len(obs_tab['date_time'][:])==0: # go to next file if obs_tab is empty \n #print('ZERO length')\n continue \n\n all_len.append( len(obs_tab['date_time'][:] ) )\n \n if k not in cleaned_df_container.keys():\n cleaned_df_container[k] = {}\n\n cleaned_df_container[k][F] = {}\n cleaned_df_container[k][F]['obs_tab'] = obs_tab # cleaned dataframe \n cleaned_df_container[k][F]['era5fb_tab'] = era5fb_tab # cleaned dataframe \n \n \"\"\" Merging the different records found in the sifferent sources \"\"\"\n if bool(all_len): # skipping empty container dictionary. At this point I certainyl have one valid record \n best_ds, combined_obs_tab, combined_era5fb_tab, combined_head_tab, selected_file, best_file = self.combine_record(dt, container = cleaned_df_container)\n \n if is_same_record: # decide what to keep in case of same record\n temporary_previous = all_combined_obs[-1] # keep the temporary previous record \n\n if best_ds in ['era5_1','era5_2']: # best_ds from era5\n if best_ds_list[-1] not in ['era5_1','era5_2']: # remove previous non era5_1 or era5_2 record \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n\n elif best_ds_list[-1] in ['era5_1','era5_2']:\n if len(combined_obs_tab) <= len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab)\n continue # nothing to do, will keep the previous records -> go to next dt \n \n else: # case where both the current and previous are from era5_1 and era5_2, but the previous has smaller number of data \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # best_ds not from era5\n if best_ds_list[-1] in ['era5_1','era5_2']:\n #print('This best ds is ' , best_ds , ' but I will keep ' , best_ds_list[-1] )\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else:\n if len(combined_obs_tab) < len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue # nothing to do, will keep the previous records -> go to next dt \n \n elif len(combined_obs_tab) > len(all_combined_obs[-1] ): # remove previous, keep current \n for lista in all_list:\n lista.pop() \n #kept_record.append(combined_obs_tab) \n #removed_record.append(temporary_previous)\n \n elif len(combined_obs_tab) == len(all_combined_obs[-1] ): # prefer igra2, otherwise\n if best_ds == 'igra2':\n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # case where data source is not important, I keep the previous and do nothing \n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else: # not the same record, nothing special to do, keep both previous and current \n pass \n else:\n print(' Found an empty record / time shifted record ')\n continue\n \n\n \"\"\" Fill the best_ds list \"\"\"\n best_ds_list.append(best_ds)\n\n \"\"\" Storing the selected file for the source_configuration \"\"\"\n source_files.append(selected_file)\n \"\"\" Selecting the station_configuration \"\"\"\n station_configurations.append(self.data[best_ds][best_file]['station_configuration'] )\n \n \"\"\" Storing the combined era5fb, header and observations tables\"\"\"\n all_combined_era5fb.append(combined_era5fb_tab)\n all_combined_obs .append(combined_obs_tab)\n \n primary, name = self.data[best_ds][best_file]['station_configuration']['primary_id'][0] , self.data[best_ds][best_file]['station_configuration']['station_name'][0] \n #combined_head_tab['primary_station_id'] = [ primary ] * len( combined_head_tab ) \n #combined_head_tab['station_name'] = [ name ] * len( combined_head_tab ) \n \n combined_head_tab['primary_station_id'] = np.array( [primary] )\n combined_head_tab['station_name'] = np.array( [name] )\n \n all_combined_head .append(combined_head_tab)\n\n \"\"\" Dictionary to fill the best_ds for duplicates \"\"\"\n #dt_bestds_dic[dt] = {}\n #dt_bestds_dic[dt]['best_ds'] = best_ds\n #dt_bestds_dic[dt]['len'] = len(combined_obs_tab['date_time'])\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n combined_indices.append(len(combined_obs_tab['date_time'])) \n combined_date_time.append(dt)\n\n del cleaned_df_container \n \n \n \n #print(blue + 'Memory used after deleting the cleaned_df_container: ', process.memory_info().rss/1000000000 , cend)\n\n \"\"\" Removing remaining loaded df \"\"\"\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n try:\n del data[k][F]['era5fb_tab']\n print('=== removed era5fb ' , k , F )\n except:\n pass\n try:\n del data[k][F]['observations_table']\n print('=== removed obstab ' , k , F ) \n except:\n pass\n \n \n \"\"\" Saving a numpy dictionary \"\"\"\n print(\" === Saving the numpy dictionary of removed and kept records +++ \")\n #dic_records = { 'kept' : kept_record , 'removed': removed_record }\n #np.save(self.station + '_time_shift_removed_kept.npy',dic_records )\n \n \n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n combined_date_time = np.array(combined_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : combined_date_time.shape } , combined_date_time )\n di['recordtimestamp'].attrs['units']='seconds since 1900-01-01 00:00:00'\n\n \"\"\" Creating the merged indices mi \"\"\"\n mi = [] \n mi.append(0)\n for i in range(len(combined_indices)):\n mi.append( combined_indices[i] + mi[-1] )\n mi.pop()\n pop = np.array(mi) # removing last unecessary index \n di['recordindex'] = ( {'recordindex' : pop.shape } , pop )\n\n\n \"\"\" Creating the combined data \"\"\"\n logging.debug('*** Concatenating the observations_table ' ) \n combined_obs = {}\n #### Writing combined observations_table dic\n logging.info(' ***** Writing the observations_table to the netCDF output ***** ' ) \n for k in all_combined_obs[0].keys(): \n a = np.concatenate([all_combined_obs[i][k][:] for i in range(len(all_combined_obs))])\n if k == 'date_time':\n combined_obs[k]= a \n self.tot_records = len(combined_obs[k])\n self.write_merged(content = 'observations_table', table= {k:a})\n #logging.info('*** Written observations table %s: ', k)\n\n\n #self.tot_records = len(combined_obs['date_time'])\n del all_combined_obs\n print(blue + 'Memory used after deleting all_combined_obs dic: ', process.memory_info().rss/1000000000 , cend )\n \n dateindex = combined_obs['date_time']//86400 \n date_times, indices, counts = np.unique(dateindex, return_counts = True, return_index= True) \n di['dateindex'] = ( {'dateindex' : indices.shape } , indices ) # considers the day only \n del combined_obs\n \n combined_era5fb = {}\n #### Writing combined era5fb_table dic \n for k in all_combined_era5fb[0].keys():\n try:\n #combined_era5fb[k]=np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n #self.write_merged(content = 'era5fb', table= {k:combined_era5fb[k]})\n \"\"\" try replacing , remove combined_era5fb = {} \"\"\"\n a = np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n self.write_merged(content = 'era5fb', table= {k:a})\n logging.debug('*** Written era5fb %s: ', k)\n except:\n print(\"FAILED feedback variable \" , k)\n\n del all_combined_era5fb\n print(blue + 'Memory used after deleting era5fb_tab dic: ', process.memory_info().rss/1000000000 , cend)\n\n\n #### Writing combined header_table dic \n for k in all_combined_head[0].keys():\n print('head variable is', k )\n if ( k == 'comments' or k == 'history'):\n continue\n try:\n tab=np.concatenate([all_combined_head[i][k][:] for i in range(len(all_combined_head))])\n self.write_merged(content = 'header_table', table= {k: tab}) # { key: np.array([])}\n logging.info('*** Written header table %s: ', k)\n except:\n print('FFF FAILED variable in header table', k )\n\n del all_combined_head\n print(blue + 'Memory used after deleting all_merged head_tab dic: ', process.memory_info().rss/1000000000 , cend)\n \n self.write_merged(content = 'recordindex', table = di) \n self.write_merged(content = 'cdm_tables', table= '')\n\n\n source_conf=xr.Dataset()\n source_files = np.array(source_files).astype(dtype='|S70')\n source_conf['source_file'] = ( {'source_file' : source_files.shape } , source_files )\n self.write_merged(content = 'source_configuration', table= source_conf )\n\n print(0)\n\n\n \"\"\" Concatenation of station_configurations \"\"\"\n station_conf = pd.concat( station_configurations ) \n for k in station_conf.columns:\n try:\n a =np.array( station_conf[k])\n self.write_merged(content = 'station_configuration', table= {k:a})\n logging.debug('*** Written station_configuration %s: ', k)\n except:\n print(\" Failed station_configuration \" , k )\n \n return 0", "def step_parallel(in_csv_filename, terrestrial_data, marine_data, ancillary_path,\n out_csv_filename, from_gbif=True):\n csv_filename_pairs, header = get_chunk_files(\n in_csv_filename, out_csv_filename=out_csv_filename)\n\n# in_csv_fn, out_csv_fn = csv_filename_pairs[0]\n# intersect_csv_and_shapefiles(in_csv_fn, terrestrial_data,\n# marine_data, ancillary_path, out_csv_fn, False)\n\n with ProcessPoolExecutor() as executor:\n for in_csv_fn, out_csv_fn in csv_filename_pairs:\n executor.submit(\n intersect_csv_and_shapefiles, in_csv_fn, terrestrial_data,\n marine_data, ancillary_path, out_csv_fn, from_gbif)\n\n try:\n outf = open(out_csv_filename, 'w', encoding='utf-8')\n outf.write('{}'.format(header))\n smfile_linecount = 0\n for _, small_csv_fn in csv_filename_pairs:\n curr_linecount = get_line_count(small_csv_fn) - 1\n print('Appending {} records from {}'.format(\n curr_linecount, small_csv_fn))\n # Do not count header\n smfile_linecount += (curr_linecount)\n lineno = 0\n try:\n for line in open(small_csv_fn, 'r', encoding='utf-8'):\n # Skip header in each file\n if lineno == 0:\n pass\n else:\n outf.write('{}'.format(line))\n lineno += 1\n except Exception as inner_err:\n print('Failed to write {} to merged file; {}'.format(small_csv_fn, inner_err))\n except Exception as outer_err:\n print('Failed to write to {}; {}'.format(out_csv_filename, outer_err))\n finally:\n outf.close()\n\n lgfile_linecount = get_line_count(out_csv_filename) - 1\n print('Total {} of {} records written to {}'.format(\n lgfile_linecount, smfile_linecount, out_csv_filename))", "def load_data(messages_filepath, categories_filepath): \n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n return pd.merge(messages, categories, how='outer', left_on='id', right_on='id')", "def consolidate_keywords(self, mappings: dict) -> None:\n\n for file_name in self._meta_data_dict.keys():\n file_mappings = self._meta_data_dict[file_name]\n new_mappings = dict()\n\n for key in file_mappings:\n parent_key = mappings.get(key)\n new_mappings[parent_key] = file_mappings[key]\n\n self._meta_data_dict[file_name] = new_mappings", "def merge_text_files(merged_filename, directory, *args, **kwargs):\n try:\n files_lst = os.listdir(directory)\n total = len(files_lst)\n print '\\nFound {} files to be merged...'.format(total)\n with open(merged_filename, 'w') as f:\n for i, l in enumerate(files_lst):\n with open(os.path.join(directory, l)) as log:\n for n, line in enumerate(log):\n if n == 0:\n if kwargs.get('first_line', False):\n if line.find(kwargs['first_line']) == -1:\n continue\n f.write(line)\n print '\\nFile {} of {} was merged successfully.'.format(i + 1, total)\n except Exception as e:\n print '\\nError merging logs {}'.format(e)", "def combineFilesWithParas(dir_path, filename, targetDir=None, connector=\"=\"):\n filenameL = listR.toList(filename)\n\n for filename in filenameL: # do the following for each file:\n # get list of common parameters:\n allParas = []\n for aDir in dirR.listNestedDirContainsFiles(dir_path, filename):\n allParas.append(listR.readCSESD(os.path.join(aDir, filename)).keys())\n allParas = listR.removeDuplicates(listR.intersect(allParas))\n\n # generate modified file name:\n if targetDir == None: targetDir=os.getcwd()\n new_filename = os.path.join(targetDir,os.path.splitext(filename)[0]+\"(\"+\",\".join(listR.stringizeL(allParas))+\")\"+os.path.splitext(filename)[1])\n\n # generate the combined data file:\n outFile = open(new_filename, \"w\")\n for aDir in dirR.listNestedDirContainsFiles(dir_path, filename):\n to_read = os.path.join(aDir, filename) # target file: path+name\n parasD = listR.readCSESD(to_read) # get a dictionary of parameter values\n para_str = \" \".join(listR.stringizeL(listR.getValueListFromDict(allParas, parasD))) # get the associated parameter list in string format\n inFile = open(to_read, \"r\")\n buffer = inFile.readlines() # read target file\n for aLine in buffer: # concarnate target file with parameter list:\n outFile.write(para_str+\" \"+aLine)\n inFile.close()\n outFile.close()", "def loadFiles(analyzer,totalFiles):\n for filename in totalFiles:\n if filename.endswith('.csv'):\n print('Cargando archivo: ' + filename)\n loadTrips(analyzer, filename)\n print(\"Cargando información extra...\")\n model.findPopulars(analyzer)\n model.findPopularsAdd(analyzer)\n return analyzer", "def handlehtmlsearch_csv(querystring, keywordstring, searchlimit, searchname, cache, smartconstrain):\n fulltitle = os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname + 'Full.csv')\n contitle = os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname + 'Condensed.csv')\n\n if wcexists:\n if not os.path.exists(os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname)):\n os.makedirs(os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname))\n\n with open(fulltitle, 'wt') as csvFull, open(contitle, 'wt') as csvCon:\n fwriter = csv.writer(csvFull, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n cwriter = csv.writer(csvCon, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n keywords, mpsearch, wokresults, keyresults = handlehtmlsearch_wok(querystring, keywordstring, searchlimit,\n cache, smartconstrain)\n\n conheader = ['Material', 'Publications', 'Space Group', 'Calculated Band Gap']\n for n in keywords:\n conheader.append(n)\n cwriter.writerow(conheader)\n\n linenum = 0\n\n for i in range(len(wokresults)):\n searchdata = wokresults[i]\n\n if wcexists:\n wc = searchWoKTools.generateabstractwc(searchdata)\n imgpath = os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname,\n searchdata[0]['pretty_formula'] + '.png')\n wc.to_file(imgpath)\n\n fwriter.writerow([searchdata[0]['pretty_formula'],\n str(searchdata[0]['numResults']) + ' publications',\n str(searchdata[0]['spacegroup']) + ' spacegroup',\n str(searchdata[0]['band_gap']) + ' band gap',\n searchdata[0]['searchURL'],\n '=HYPERLINK(\"' + imgpath + '\",\"Word Cloud\")'])\n linenum += 1\n\n conline = [\n '=HYPERLINK(\"[' + fulltitle + ']' + searchname + 'Full' + '!A' + str(linenum) + '\",\"' +\n searchdata[0]['pretty_formula'] + '\")',\n\n str(searchdata[0]['numResults']),\n str(searchdata[0]['spacegroup']),\n str(searchdata[0]['band_gap'])]\n\n fwriter.writerow([])\n linenum += 1\n\n for key in keyresults[i].keys():\n keyrow = []\n conkeynum = 0\n for n in range(len(keyresults[i][key])):\n paper = keyresults[i][key][n]\n if paper != 0:\n cellstring = '=HYPERLINK(\"' + searchdata[1][n]['DOIlink'] + '\",\"' + key + '(' + str(\n paper) + ')\")'\n keyrow.append(cellstring)\n conkeynum += 1\n if keyrow:\n fwriter.writerow(keyrow)\n linenum += 1\n if conkeynum != 0:\n constring = '=HYPERLINK(\"[' + fulltitle + ']' + searchname + 'Full' + '!A' + str(\n linenum) + '\",\"' + str(conkeynum) + '\")'\n conline.append(constring)\n else:\n conline.append('')\n\n cwriter.writerow(conline)\n\n fwriter.writerow([])\n fwriter.writerow([])\n linenum += 2\n\n return json.dumps([os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname)])", "def combine_source_target_files(source_dir, target_dir, out_dir, file_matcher, original_ids=None):\n source_files = get_all_files(source_dir, file_matcher)\n target_files = get_all_files(target_dir, file_matcher)\n target_file_bases = np.array(list(map(lambda x: os.path.basename(x).lower(), target_files)))\n id_var = 'id'\n dedup_vars = [id_var]\n all_txt_vars = ['text', 'user_description', 'user_location']\n RETURN_CHAR_MATCHER = re.compile('[\\n\\r\\t]')\n if(not os.path.exists(out_dir)):\n os.mkdir(out_dir)\n for source_file in source_files:\n # find matching target file\n source_file_base = os.path.basename(source_file).lower()\n target_file_base_idx = np.where(target_file_bases == source_file_base)[0]\n combined_data_file_name = os.path.join(out_dir, source_file_base)\n# if(not os.path.exists(combined_data_file_name)):\n # if target file exists, then combine source/target\n if(len(target_file_base_idx) > 0):\n target_file_base_idx = target_file_base_idx[0]\n target_file = target_files[target_file_base_idx]\n try:\n source_data = pd.read_csv(source_file, sep='\\t', compression='gzip')\n if('Unnamed: 0' in source_data.columns):\n source_data.drop('Unnamed: 0', axis=1, inplace=True)\n # fix column name mismatches\n source_data.rename(columns={'user_screen_name' : 'screen_name', 'user_id' : 'author_id'}, inplace=True)\n target_data = pd.read_csv(target_file, sep='\\t', compression='gzip')\n # combine!\n logging.info(f'combining files for {source_file_base}')\n combined_data = pd.concat([source_data, target_data], axis=0)\n # deduplicate!\n combined_data.drop_duplicates(dedup_vars, inplace=True)\n # clean\n combined_data.fillna('', inplace=True)\n # filter original IDs\n if(original_ids is not None):\n combined_data = combined_data[~combined_data.loc[:, id_var].isin(original_ids)]\n # remove return characters\n for txt_var_i in all_txt_vars:\n combined_data = combined_data.assign(**{\n txt_var_i : combined_data.loc[:, txt_var_i].apply(lambda x: RETURN_CHAR_MATCHER.sub('', str(x)))\n })\n logging.info('%d/%d source/target'%(source_data.shape[0], target_data.shape[0]))\n logging.info('combined data has %d/%d data'%(combined_data.shape[0], source_data.shape[0]+target_data.shape[0]))\n # write to file\n combined_data.to_csv(combined_data_file_name, sep='\\t', compression='gzip', index=False)\n except Exception as e:\n logging.info(f'going to skip file {source_file_base} because error {e}')\n # if target file does not exist, copy the source data\n else:\n logging.info(f'copying {source_file} without combining')\n source_data = pd.read_csv(source_file, sep='\\t', compression='gzip')\n if('Unnamed: 0' in source_data.columns):\n source_data.drop('Unnamed: 0', axis=1, inplace=True)\n # fix column name mismatches\n source_data.rename(columns={'user_screen_name' : 'screen_name', 'user_id' : 'author_id'}, inplace=True)\n source_data.to_csv(combined_data_file_name, sep='\\t', compression='gzip', index=False)", "def intersect_csv_and_shapefiles(in_csv_filename, geodata1, geodata2,\n ancillary_path, out_csv_filename, from_gbif):\n pth, basefname = os.path.split(out_csv_filename)\n logbasename, _ = os.path.splitext(basefname)\n logfname = os.path.join(pth, '{}.log'.format(logbasename))\n logger = get_logger(logbasename, logfname)\n bf = BisonFiller(log=logger)\n # Pass 4 of CSV transform, final step, point-in-polygon intersection\n bf.update_point_in_polygons(\n geodata1, geodata2, ancillary_path, in_csv_filename, out_csv_filename,\n from_gbif=from_gbif)\n # Do intersection here\n sleep(randint(0, 10))\n print(' - {}'.format(out_csv_filename))", "def join_dataframe(file_name_1, file_name_2):\n df1 = pd.read_csv(file_name_1, sep=\"\\t\", usecols=[\"userID\", \"movieID\", \"rating\"], nrows=1000)\n df2 = pd.read_csv(file_name_2, sep=\"\\t\", usecols=[\"movieID\", \"genre\"])\n merged = df1.merge(df2, on='movieID')\n # #merged.to_csv(\"merged.csv\", sep='\\t')\n df2[\"dummy_column\"] = 1\n df_pivoted = df2.pivot_table(index=\"movieID\", columns=\"genre\", values=\"dummy_column\") #The levels in the pivot table will be stored in MultiIndex objects\n # (hierarchical indexes) on the index and columns of the result DataFrame.\n df_pivoted = df_pivoted.fillna(0) # change Nan to 0\n\n\n df = pd.merge(df1, df_pivoted, on=[\"movieID\"])\n genres = df.columns[3:-1].values\n\n #df.columns = [\"genres_\" + name if name not in df.columns[:3] else name for name in df.columns]\n #Genres_with = df.columns[3:-1].values\n users_id = df['userID'].unique()\n\n return df,merged, genres, users_id", "def load_data(messages_filepath, categories_filepath):\n\n message_df = pd.read_csv(messages_filepath)\n categories_df = pd.read_csv(categories_filepath)\n\n merged = message_df.merge(categories_df, how='inner', on='id')\n\n return merged", "def concat_file(filename):\n csv_paths = read_csv(filename)\n\n data_len = 0\n df_total = None\n for csv_name, csv_path in tqdm(csv_paths):\n print(csv_name)\n df = dt.fread(csv_path).to_pandas()\n data_len += df.shape[0]\n\n process_df = filter_law(df)\n\n if df_total is None:\n df_total = process_df.copy()\n else:\n df_total = pd.concat([df_total, process_df], ignore_index=True)\n\n print(\"Total data count: {}\".format(data_len))\n df_total.to_csv('eda_concat.csv')", "def merge_walkupseq_files(latest_tsca_id):\n paths = glob.glob('walkupseq_files/*sample_info*')\n\n dfs = []\n for f in paths:\n tmp = pd.read_table(f, encoding='latin1')\n dfs.append(tmp)\n\n df = pd.concat(dfs, axis=0)\n df.to_csv('walkupseq_files/walkupseq_all_combined_%s.txt'%latest_tsca_id, sep=\"\\t\", index=None)\n return df", "def load_data(messages_filepath, categories_filepath):\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n merged_df = messages.merge(right=categories, on=\"id\", how=\"inner\")\n return merged_df", "def execute_processor(self):\n \n # pull in the parameter that has the file names we will process\n filename1 = self.param_dict['file1']\n filename2 = self.param_dict['file2']\n \n ''' these next 2 lines are the ones that I added to create a dummy row '''\n right_dummy = self.create_dummy_row( self.param_dict['dummy_rec_right'])\n left_dummy = self.create_dummy_row( self.param_dict['dummy_rec_left'])\n \n \n \n\n self.open_files(os.path.join(self.entry.working_directory,filename1), os.path.join(self.entry.working_directory,filename2))\n self.process_params()\n key_dict = self.create_key_match()\n file1_rec = self.read_file1(first=True)\n file2_rec = self.read_file2(first=True)\n \n file2_used = False\n \n # call the convenience method to setup the temp_csv file. This will also write the header row by default\n self.setup_csv_temp_writer(self.get_temp_csv_name(), self.get_header(self.file1_reader.fieldnames,self.file2_reader.fieldnames),preserve_order=True)\n \n while file1_rec:\n combined = {k:v for k,v in file1_rec.items()}\n if file2_rec and self.get_key(file2_rec,self.file2_key) == self.get_key(file1_rec,self.file1_key):\n # merge these two bad boys\n combined.update(self.get_values(file2_rec))\n file2_used = True\n ### WRITE ###\n self.write_temp_rec(combined)\n file1_rec = self.read_file1()\n elif file2_rec and self.get_key(file1_rec,self.file1_key) > self.get_key(file2_rec,self.file2_key):\n if not file2_used and left_dummy:\n ''' left side dummy \n now use the already created dummy_row to updated the dictionary '''\n left_dummy.update(self.get_values(file2_rec))\n key_fields = {key_dict[k]:file2_rec[k] for k in self.file2_key.split(\",\")}\n left_dummy.update(key_fields)\n self.write_temp_rec(left_dummy)\n left_dummy = self.create_dummy_row( self.param_dict['dummy_rec_left'])\n \n \n file2_rec = self.read_file2()\n file2_used = False\n \n elif not file2_rec or self.get_key(file1_rec,self.file1_key) < self.get_key(file2_rec,self.file2_key):\n ### WRITE REC WITH NO MATCH ###\n if self.keep_nomatch:\n ''' right side dummy\n now use the already created dummy_row to updated the dictionary '''\n if right_dummy:\n combined.update(self.get_values(right_dummy))\n self.write_temp_rec(combined)\n file1_rec = self.read_file1()\n else:\n raise Exception\n self.close_temp_csv()\n return 0", "def perform_combination(sonar_model, input_paths, output_path, engine):\n # TODO: there should be compression option for the combined file too...\n\n def coerce_type(ds, group):\n if group == 'Beam':\n if sonar_model == 'EK80':\n ds['transceiver_software_version'] = ds['transceiver_software_version'].astype('<U10')\n ds['channel_id'] = ds['channel_id'].astype('<U50')\n elif sonar_model == 'EK60':\n ds['gpt_software_version'] = ds['gpt_software_version'].astype('<U10')\n ds['channel_id'] = ds['channel_id'].astype('<U50')\n\n print(f\"{dt.now().strftime('%H:%M:%S')} combining files...\")\n\n # TODO: add in the documentation that the Top-level and Sonar groups are\n # combined by taking values (attributes) from the first file\n # Combine Top-level group, use values from the first file\n with xr.open_dataset(input_paths[0], engine=engine) as ds_top:\n io.save_file(ds_top, path=output_path, mode='w', engine=engine)\n\n # Combine Sonar group, use values from the first file\n with xr.open_dataset(input_paths[0], group='Sonar', engine=engine) as ds_sonar:\n io.save_file(ds_sonar, path=output_path, mode='a', engine=engine, group='Sonar')\n\n # Combine Provenance group,\n ds_prov = assemble_combined_provenance(input_paths)\n io.save_file(ds_prov, path=output_path, mode='a', engine=engine, group='Provenance')\n\n # TODO: Put the following in docs:\n # Right now we follow xr.combine_by_coords default to only combine files\n # with nicely monotonically varying ping_time/location_time/mru_time.\n # However we know there are lots of problems with pings going backward in time for EK60/EK80 files,\n # and we will need to clean up data before calling merge.\n # Combine Beam\n with xr.open_mfdataset(input_paths, group='Beam',\n concat_dim='ping_time', data_vars='minimal', engine=engine) as ds_beam:\n coerce_type(ds_beam, 'Beam')\n io.save_file(ds_beam.chunk({'range_bin': DEFAULT_CHUNK_SIZE['range_bin'],\n 'ping_time': DEFAULT_CHUNK_SIZE['ping_time']}), # these chunk sizes are ad-hoc\n path=output_path, mode='a', engine=engine, group='Beam')\n\n # Combine Environment group\n with xr.open_mfdataset(input_paths, group='Environment',\n concat_dim='ping_time', data_vars='minimal', engine=engine) as ds_env:\n io.save_file(ds_env.chunk({'ping_time': DEFAULT_CHUNK_SIZE['ping_time']}),\n path=output_path, mode='a', engine=engine, group='Environment')\n\n # Combine Platform group\n if sonar_model == 'AZFP':\n with xr.open_mfdataset(input_paths, group='Platform',\n combine='nested', # nested since this is more like merge and no dim to concat\n compat='identical', engine=engine) as ds_plat:\n io.save_file(ds_plat, path=output_path, mode='a', engine=engine, group='Platform')\n elif sonar_model == 'EK60':\n with xr.open_mfdataset(input_paths, group='Platform',\n concat_dim=['location_time', 'ping_time'],\n data_vars='minimal', engine=engine) as ds_plat:\n io.save_file(ds_plat.chunk({'location_time': DEFAULT_CHUNK_SIZE['ping_time'],\n 'ping_time': DEFAULT_CHUNK_SIZE['ping_time']}),\n path=output_path, mode='a', engine=engine, group='Platform')\n elif sonar_model in ['EK80', 'EA640']:\n with xr.open_mfdataset(input_paths, group='Platform',\n concat_dim=['location_time', 'mru_time'],\n data_vars='minimal', engine=engine) as ds_plat:\n io.save_file(ds_plat.chunk({'location_time': DEFAULT_CHUNK_SIZE['ping_time'],\n 'mru_time': DEFAULT_CHUNK_SIZE['ping_time']}),\n path=output_path, mode='a', engine=engine, group='Platform')\n\n # Combine Platform/NMEA group\n if sonar_model in ['EK60', 'EK80', 'EA640']:\n with xr.open_mfdataset(input_paths, group='Platform/NMEA',\n concat_dim='location_time', data_vars='minimal', engine=engine) as ds_nmea:\n io.save_file(ds_nmea.chunk({'location_time': DEFAULT_CHUNK_SIZE['ping_time']}).astype('str'),\n path=output_path, mode='a', engine=engine, group='Platform/NMEA')\n\n # Combine Vendor-specific group\n if sonar_model == 'AZFP':\n with xr.open_mfdataset(input_paths, group='Vendor',\n concat_dim=['ping_time', 'frequency'],\n data_vars='minimal', engine=engine) as ds_vend:\n io.save_file(ds_vend, path=output_path, mode='a', engine=engine, group='Vendor')\n else:\n with xr.open_mfdataset(input_paths, group='Vendor',\n combine='nested', # nested since this is more like merge and no dim to concat\n compat='no_conflicts', data_vars='minimal', engine=engine) as ds_vend:\n io.save_file(ds_vend, path=output_path, mode='a', engine=engine, group='Vendor')\n\n # TODO: print out which group combination errors out and raise appropriate error\n\n print(f\"{dt.now().strftime('%H:%M:%S')} all files combined into {output_path}\")", "def combineVul(vulPaths):\n\n print(\"Start combining vulnerability data\")\n\n outDir = os.path.join(DEFAULT_OUTPUT, \"smell&vul\")\n if not os.path.exists(outDir):\n os.makedirs(outDir)\n\n for projName, vulPath in vulPaths:\n print(f\"Start combining '{projName}' vulnerability data\")\n smellPath = os.path.join(DEFAULT_OUTPUT, 'smells', projName+'.csv')\n outPath = os.path.join(outDir, projName+'.csv')\n\n if not os.path.isfile(vulPath):\n print(f\"ERROR: Vulnerability data does not exist in path '{vulPath}', skip.\")\n continue\n if not os.path.isfile(smellPath):\n print(f\"ERROR: Vulnerability data does not exist in path '{smellPath}', skip.\")\n continue\n\n with open(outPath, 'w') as out, open(smellPath, 'r') as smellIn, open(vulPath, 'r') as vulIn:\n smellReader = csv.DictReader(smellIn)\n vulReader = csv.DictReader(vulIn)\n\n header = getOrderedHeader(list(vulReader.fieldnames) + list(smellReader.fieldnames))\n\n smellDict = {(x['Name'], x['Version']): x for x in smellReader}\n vulDict = {(x['Name'], x['Version']): x for x in vulReader}\n\n for k, v in vulDict.items():\n if k in smellDict:\n smellDict[k].update(v)\n else:\n print(f\"WARN: {k} pair is not in smell data\")\n\n writer = csv.DictWriter(out, fieldnames=header, delimiter=\",\")\n writer.writeheader()\n writer.writerows(smellDict.values())", "def merge(root, option, values, extension=\"txt\"):\n if option.upper() not in OPTIONS:\n raise ValueError(\"Options is not implemented: {}\".format(option))\n elif option.upper() == \"TOPIC\":\n searchTerms = values\n elif option.upper() == \"YEAR\":\n startYear, endYear = values\n searchTerms = range(startYear, endYear+1)\n else:\n raise ValueError(\"Update merge OPTIONS, option is not included: {}\".format(option))\n\n for searchTerm in searchTerms:\n document = mergeSimilarDocuments(root, searchTerm, extension)\n\n filename = '{}/{}.{}'.format(root + '_merged', str(searchTerm), str(extension))\n with codecs.open(filename, 'w+', encoding=\"utf-8\") as f:\n f.write(document)", "def process_file(input_file, output_good, output_bad):\n\t# Lists containing good and bad rows\n\tlist_good = []\n\tlist_bad = []\n\t# Open csv file \n\twith open(input_file, 'r') as f:\n\t\t# Create reader from csv and store header row\n\t\treader = csv.DictReader(f)\n\t\theader = reader.fieldnames\n\t\t# Store useful keys\n\t\tyear = 'productionStartYear'\n\t\turi = 'URI'\n\t\t# Loop through all rows\n\t\tfor row in reader:\n\t\t\t# Discard rows with a URI not from DBpedia\n\t\t\tif not row[uri].startswith('http://dbpedia.org'):\n\t\t\t\tcontinue\n\t\t\t# Extract year from datetime\n\t\t\tyear_value = row[year][:4]\n\t\t\t# Change row datetime value to its year\n\t\t\trow[year] = year_value\n\t\t\t# Check if year actually contains a year\n\t\t\tif not row[year].isdigit():\n\t\t\t\t# Add to list_bad\n\t\t\t\tlist_bad.append(row)\n\t\t\t\tcontinue\n\t\t\t# Check if year falls within expected range\n\t\t\tif int(row[year]) < 1886 or int(row[year]) > 2014:\n\t\t\t\t# Add list to bad\n\t\t\t\tlist_bad.append(row)\n\t\t\t\tcontinue\n\t\t\t# Row is proper, add to list_good\n\t\t\tlist_good.append(row)\n\t\t\n\t\t# Open good ouput file, write the good rows to it\n\t\twith open(output_good, 'w') as csvfile:\n\t\t\twriter = csv.DictWriter(csvfile, delimiter = ',', fieldnames = header)\n\t\t\twriter.writeheader()\n\t\t\tfor item in list_good:\n\t\t\t\twriter.writerow(item)\n\n\t\t# Open bad ouput file, write the nad rows to it\n\t\twith open(output_bad, 'w') as csvfile:\n\t\t\twriter = csv.DictWriter(csvfile, delimiter = ',', fieldnames = header)\n\t\t\twriter.writeheader()\n\t\t\tfor item in list_bad:\n\t\t\t\twriter.writerow(item)", "def parse_csv_files(csv_files, **kwargs):\n\n per_token_savings = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\n slip_price_diff_splits = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\n\n for file in csv_files:\n per_file_base_prices = {}\n for _, _, trade_size, token, exchange, exchange_price, _, totle_price, pct_savings, splits, _ in csv_row_gen(file, **kwargs):\n if not per_file_base_prices.get(token): # this assumes prices recorded from lowest to highest for a token\n per_file_base_prices[token] = totle_price # should be same for all aggs, but is slightly different sometimes\n\n slip = (totle_price / per_file_base_prices[token]) - 1.0 # should be 0 for the lowest trade_size\n # i.e. slip = (totle_price - per_file_base_prices[token]) / per_file_base_prices[token]\n\n slip = 0.0 if slip < 0.0 and slip > -0.00001 else slip # get rid of -0.0000\n price_diff = (totle_price - exchange_price) / exchange_price\n\n slip_price_diff_splits[token][trade_size][exchange].append((slip, price_diff, splits))\n per_token_savings[token][trade_size][exchange].append(pct_savings)\n\n\n return per_token_savings, slip_price_diff_splits", "def csvs_scattered_to_grouped(path_dir, inlist, outlist, gcols,\n sort=1, scols=None, catalog=\"\", supersede=False):\n\n filelist=[os.path.join(path_dir,i) for i in inlist]\n n_split=len(outlist)\n\n pdfs=pd.read_csv(filelist[0],usecols=gcols)\n pdfs.drop_duplicates(inplace=True)\n\n print(\"csvs_scattered_to_grouped: Collecting items for group.\\n\")\n for i in range(1,len(filelist)):\n pdfs=pdfs.append(pd.read_csv(filelist[i],usecols=gcols),ignore_index=True)\n pdfs.drop_duplicates(inplace=True)\n\n if sort==1:\n pdfs.sort_values(gcols,inplace=True, ascending=True)\n elif sort==-1:\n pdfs.sort_values(gcols,inplace=True, ascending=False)\n\n aa_ed=np.array_split(pdfs, n_split)\n\n if supersede:\n for i in outlist:\n if os.path.isfile(os.path.join(path_dir,i)):\n os.remove(os.path.join(path_dir,i))\n if os.path.isfile(os.path.join(path_dir,str(catalog))):\n os.remove(os.path.join(path_dir,str(catalog)))\n\n print(\"csvs_scattered_to_grouped: Start processing files:\\n\")\n for i in range(0,len(filelist)):\n fi=pd.read_csv(filelist[i],usecols=scols)\n for j,ja in enumerate(aa_ed):\n wrtj=pd.merge(ja, fi, how='inner', on=gcols)\n append_to_csv(wrtj, os.path.join(path_dir,outlist[j]))\n print('csvs_scattered_to_grouped: '+str(i)+' file(s) finished.')\n\n if catalog:\n for i, d in enumerate(aa_ed):\n d['_@_FILE_']=outlist[i]\n append_to_csv(d, os.path.join(path_dir,str(catalog)))\n print('csvs_scattered_to_grouped: Catalog file created.')", "def export_csv_search(cart, tag = None):\n # Reads all the tweets in folder\n try:\n tweets = tbf.load_stream(cart, tag = tag)\n\n if tag is None:\n ii = cart.index('/#')\n tag = cart[ii+1:-1]\n\n nodes = np.unique(np.array([twe.user_name for twe in tweets]))\n #links_A = [lin.name_A for lin in twe.link_to]\n\n links_A = []\n links_B = []\n for twe in tweets:\n links_A += [lin.name_A for lin in twe.link_to]\n links_B += [lin.name_B for lin in twe.link_to]\n\n #tbf.export_csv(links_A, links_B)\n fileo = open(cart + tag + '_links.csv', 'w')\n filecsv = csv.writer(fileo,delimiter='\\t')\n\n for A, B in zip(links_A, links_B):\n filecsv.writerow([A,B])\n\n fileo.close()\n status = True\n cazzillo = None\n\n except Exception as cazzillo:\n print(cazzillo)\n status = False\n\n return status, cazzillo", "def import_from_file(jamsite, source='jammers.csv', fieldnames=None):\n\t# import jammers.csv\n\twith open(source) as csvfile:\n\t\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )", "def merge_csvs(metrics_csv_dims,metrics_csv_int,region,wk_dir):\n dict_dims = load_and_transpose_csv(metrics_csv_dims)\n dict_int = load_and_transpose_csv(metrics_csv_int)\n dict_dims.update(dict_int.copy())\n fname = os.path.join(wk_dir,region.replace(' ','_') + '_summary.csv')\n fields = [*dict_dims]\n models = ['Metric']+[*dict_dims[fields[0]]]\n\n with open(fname,'w') as f:\n writer = csv.DictWriter(f,fieldnames=models)\n writer.writeheader()\n for field in fields:\n row = {'Metric': field}\n row.update(dict_dims.get(field,{}))\n writer.writerow(row)\n\n data_desc = {\n os.path.basename(fname): {\n 'longname': os.path.basename(fname).split('.')[1].replace('_',' '),\n 'description': 'Parameters and metrics for ' + region + ' region'}}\n\n # add metadata to output.json\n asop.update_output_json('metrics', data_desc, wk_dir)", "def mergeAllSortedFiles():\n entries = os.listdir('output/Temp/input')\n for entry in entries:\n arr = []\n with open(\"output/Temp/input/\" + entry) as file:\n for line in file:\n line = int(line.strip())\n arr.append(line)\n mergeSortedToFile(arr)", "def process(self, terms):\n for entry in self.files:\n try:\n logger.info('file - {0}'.format(entry.path))\n\n # notional output file path\n path_sentences = self.path.joinpath('{0}.csv'.format(entry.path.stem))\n path_summary = self.path.joinpath('{0}-summary.csv'.format(entry.path.stem))\n logger.info('will save to - {0}'.format(path_sentences.resolve()))\n\n reports = self.inspect_doc(entry, terms)\n\n # receiving a list of dicts\n # therefore pandas can package into a useful outcome\n if len(reports) > 0:\n frame_sentences = pd.DataFrame(reports)\n\n frame_sentences = frame_sentences[['page', 'term', 'sentence']]\n logger.info('saving sentence file to - {0}'.format(path_sentences.resolve()))\n frame_sentences.to_csv(str(path_sentences.resolve()))\n \n frame_summary = frame_sentences.pivot_table(\n index='page',\n columns='term',\n aggfunc='size',\n fill_value=0\n )\n logger.info('saving summary file to - {0}'.format(path_sentences.resolve()))\n frame_summary.to_csv(str(path_summary.resolve()))\n\n\n except Exception as e:\n logger.error(e)", "def get_dataframe_from_merged_csv_files(tables_metadata, debug=False):\n combined_table = None\n for table_metadata in tables_metadata:\n if combined_table is None:\n combined_table = get_normalized_data_table(table_metadata)\n continue\n next_data_table = get_normalized_data_table(table_metadata)\n combined_table = combined_table.join(next_data_table)\n print_data_table_length('combined_table', combined_table.data, debug=debug)\n drop_headers('final_csv', combined_table.data)\n rename_headers('final_csv', combined_table.data)\n return combined_table.data", "def mergeAggregatedCsvData(self, contexts, obj, aggData1, aggData2):\n return aggData1 + aggData2", "def read_costar_multiple(list_files, **kwargs):\n list_data = []\n for file in list_files:\n list_data.append(read_costar(file, **kwargs))\n data = pd.concat(list_data,\n axis=0, join='outer', ignore_index=True)\n data = data.drop_duplicates(subset=['address', 'city', 'zip'])\n return data", "def group_tweets(target_dir:str, source_file_name:str, grouped_file_name:str) -> None:\n\n df = pd.read_csv(target_dir + source_file_name)\n df.dropna(inplace=True)\n df = df.groupby('Handle').agg(lambda x: \" \".join(list(set(x.tolist()))))\n df.to_csv(target_dir + grouped_file_name)", "def process(fileglob):\n\n filepaths = glob.glob(fileglob)\n\n for filepath in filepaths:\n datum_list = []\n aggregated_data = {'user_id': None, 'n': 0, 'sum': 0, 'min': 0, 'max': 0}\n\n for parsed_row in extract_csv_data(filepath):\n\n if aggregated_data['user_id'] is None:\n aggregated_data['user_id'] = parsed_row['user_id']\n\n if aggregated_data['user_id'] != parsed_row['user_id']:\n # We want earliest 'date' datum first.\n sorted_datum = sorted(datum_list, key=lambda k: k['date'])\n\n for datum in sorted_datum:\n aggregated_data = update_aggregated_data(aggregated_data, datum)\n\n aggregated_data = finalize_aggregated_data(aggregated_data)\n\n # Dump current stack of user info to output file.\n dump_aggregated_data(aggregated_data, output_filepath(filepath))\n\n # Re-initialize\n datum_list = []\n aggregated_data = {'user_id': parsed_row['user_id'], 'n': 0, 'sum': 0, 'min': 0, 'max': 0}\n\n \"\"\"\n We are still on same user_id so just append to datum_list.\n \"\"\"\n datum_list.append(parsed_row)\n\n\n \"\"\"\n At end of csv file, roll-up and dump last chunk of user_data.\n \"\"\"\n\n sorted_datum = sorted(datum_list, key=lambda k: k['date'])\n\n for datum in sorted_datum:\n aggregated_data = update_aggregated_data(aggregated_data, datum)\n\n aggregated_data = finalize_aggregated_data(aggregated_data)\n\n dump_aggregated_data(aggregated_data, output_filepath(filepath))", "def mergeFile():\n with open(\"output.txt\",'w') as o:\n o.write(data1)\n o.write(data2)\n o.write(data3)", "def do_merge_all():\n for rawd, merged in TOMERGE:\n mylogger.info(\"cleaning \" + merged)\n ensure_dir(merged)\n cleandir(merged)\n mylogger.info(\"merging \" + rawd + \" to \" + merged)\n build_merged_dir(build_sensor_file_map(rawd), merged)\n\n # add timestamp file\n\tf = open(TIMESTAMP_FILE,\"w\")\n\tf.write(str(datetime.datetime.now()))\n\tf.close()", "def merge(): #Status: WIP\r\n pass", "def merge_two_keyword_chunks(data_first, data_second):\n common_keyword = data_first.columns.intersection(data_second.columns)[0]\n scaling_factor = np.nanmedian(\n data_first[common_keyword] / data_second[common_keyword])\n data_second = data_second.apply(lambda x: x * scaling_factor)\n data = pd.merge(data_first, data_second.drop(\n common_keyword, axis=1), left_index=True, right_index=True)\n return data", "def write_csv(self, key_list, word_list):\n # Write out data\n out_data = []\n # Match filtered indexes to words\n for i in key_list.index:\n subset = word_list[word_list['key'] == i]\n # Add to aggregate list\n out_data.append(subset['word'].tolist())\n # Dump list to headerless CSV\n with open(self.output, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(out_data)\n return len(out_data)", "def ingest_file(input, fields, advanced_operators, output, delimiter=',', quotechar='\"'):\n with open(input, 'rb') as csv_file:\n reader = csv.DictReader(csv_file)\n\n with open(output, 'a') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl',\n 'foundProjectUrl1', 'foundProjectUrl2',\n 'foundProjectUrl3', 'foundProjectUrl4',\n 'foundProjectUrl5', 'foundProjectUrl6',\n 'foundProjectUrl7', 'foundProjectUrl8',\n 'foundProjectUrl9', 'foundProjectUrl10']\n\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n writer.writeheader() # this method only available at python 2.7\n\n search_engine = SearchWeb()\n\n # iterate reader\n for row in reader:\n query_string = str(concatenate(row, fields))\n\n response = search_engine.search(query_string, advanced_operators)\n\n projectsUrl = []\n results_size = len(response)\n\n # TODO print with logger\n print \"INFO: RESULT SIZE - %s\" % results_size\n\n for i in range(10):\n if i < results_size:\n projectsUrl.append(response[i]['Url'])\n else:\n projectsUrl.append('')\n\n # TODO print with logger\n print \"INFO: FIRST RESULT - %s\" % projectsUrl[0]\n writer.writerow(dict(acronym=row['acronym'], title=row['title'], projectUrl=row['projectUrl'],\n foundProjectUrl1=projectsUrl[0], foundProjectUrl2=projectsUrl[1],\n foundProjectUrl3=projectsUrl[2], foundProjectUrl4=projectsUrl[3],\n foundProjectUrl5=projectsUrl[4], foundProjectUrl6=projectsUrl[5],\n foundProjectUrl7=projectsUrl[6], foundProjectUrl8=projectsUrl[7],\n foundProjectUrl9=projectsUrl[8], foundProjectUrl10=projectsUrl[9]))", "def load_data(messages_filepath, categories_filepath):\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n \n return messages.merge(categories, on='id')", "def combine(args, library_sizes):\n with open(args.counts, \"r\") as counts, open(args.results, \"r\") as results:\n with open(args.output_dir + \"counts_results.txt\", \"w+\") as file1, \\\n open(args.output_dir + \"counts_results_rpm.txt\",\"w+\") \\\n as file2, \\\n open(args.output_dir + \"counts_results_rpkm.txt\", \"w+\") \\\n as file3:\n head = True\n for count_line, results_line in zip(counts, results):\n count_line = count_line.strip()\n results_line = results_line.strip()\n\n if head: # Process column names into one header\n head = False\n count_head_parts = count_line.split(\"\\t\")\n results_head_parts = results_line.split(\"\\t\")\n results_head_parts = [\"Chromosome\", \"Start\", \"End\"] + \\\n results_head_parts[1:]\n\n new_head_parts = results_head_parts + \\\n count_head_parts[2:]\n new_head = \"\\t\".join(new_head_parts)\n new_head += \"\\n\"\n file1.write(new_head)\n file2.write(new_head)\n file3.write(new_head)\n\n else:\n process(count_line, results_line,\n file1, file2, file3, library_sizes)", "def combine_files(output_filename, *passes):\n all_columns = {}\n for x in passes:\n sp = pyvyu.load_opf(x)\n column_list = sp.get_column_list()\n for c in column_list:\n all_columns[c] = sp.get_column(c)\n sp = pyvyu.Spreadsheet()\n sp.name = output_filename\n sp.columns = all_columns\n pyvyu.save_opf(sp, output_filename, True, *all_columns.keys())\n return output_filename", "def batch(infolder, outfile): # type: (str, str) -> None\n\n if not os.path.isdir(infolder):\n return\n\n results = []\n\n for filename in os.listdir(infolder):\n print('Processing ' + filename)\n curresults = []\n if filename.endswith('.txt'):\n with open(os.path.join(infolder, filename), 'r') as curfile:\n curdata = curfile.read() + '\\n'\n curresults = processClauseText(curdata, 'text')\n elif filename.endswith('.pdf'):\n with open(os.path.join(infolder, filename), 'rb') as curfile:\n curdata = base64.b64encode(curfile.read()).decode()\n curresults = processClauseText(curdata, 'pdf')\n elif filename.endswith('.docx'):\n with open(os.path.join(infolder, filename), 'rb') as curfile:\n curdata = base64.b64encode(curfile.read()).decode()\n curresults = processClauseText(curdata, 'word')\n if len(curresults) > 0:\n for result in curresults:\n result['filename'] = filename\n results.extend(curresults)\n\n if outfile is not None:\n with open(outfile, 'w') as outfile:\n json.dump(results, outfile, indent=2)", "def main(directory, csv_file, task_name):\n csv_data = pd.read_csv(csv_file)\n colnames = csv_data.columns.tolist()\n\n edat_files = glob.glob(directory + \"*.edat*\")\n text_files = glob.glob(directory + \"*-*.txt\")\n all_files = edat_files + text_files\n pairs = []\n paired_texts = []\n\n for text_file in text_files:\n [text_fname, _] = os.path.splitext(text_file)\n for edat_file in edat_files:\n [edat_fname, _] = os.path.splitext(edat_file)\n if text_fname == edat_fname:\n pairs.append([text_file, edat_file])\n\n for pair in pairs:\n paired_texts.append(pair[0])\n\n unpaired_texts = list(set(text_files) - set(paired_texts))\n three_files = []\n pop_idx = []\n\n # List of lists\n for i_file in range(len(unpaired_texts)):\n for j_pair in range(len(paired_texts)):\n if (unpaired_texts[i_file][:len(unpaired_texts[i_file])-6] in paired_texts[j_pair]):\n three_files.append([paired_texts[j_pair], pairs[j_pair][1],\n unpaired_texts[i_file]])\n pop_idx.append(i_file)\n\n for rm in reversed(pop_idx):\n unpaired_texts.pop(rm)\n\n # three_files is the text files and edats that form a triad (one edat, two\n # similarly named text files).\n for triad in three_files:\n for i_pair in reversed(range(len(pairs))):\n if triad[0:2] == pairs[i_pair]:\n pairs.pop(i_pair)\n\n two_texts = []\n all_two_texts = []\n two_text_pairs = []\n\n for i_file in range(len(unpaired_texts)):\n for j_file in range(i_file + 1, len(unpaired_texts)):\n if (unpaired_texts[i_file][:len(unpaired_texts[i_file])-6] in unpaired_texts[j_file]):\n all_two_texts.append(i_file)\n all_two_texts.append(j_file)\n two_text_pairs.append([i_file, j_file])\n\n all_two_texts = sorted(all_two_texts, reverse=True)\n\n # two_texts is the text files that pair with other text files.\n for i_pair in range(len(two_text_pairs)):\n two_texts.append([unpaired_texts[two_text_pairs[i_pair][0]],\n unpaired_texts[two_text_pairs[i_pair][1]]])\n\n for i_file in all_two_texts:\n unpaired_texts.pop(i_file)\n\n # one_text is the remaining un-paired text files.\n one_text = [[unpaired_texts[i_file]] for i_file in range(len(unpaired_texts))]\n\n # Determine subject IDs and timepoints for all files.\n # Assumes that files will be named according to convention\n # blahblahblah_[subj]-[tp].txt or blahblahblah-[subj]-[tp].txt.\n one_text_subjects = [get_subject(file_[0]) for file_ in one_text]\n one_text_timepoints = [get_timepoint(file_[0]) for file_ in one_text]\n two_text_subjects = [get_subject(pair[0]) for pair in two_texts]\n two_text_timepoints = [get_timepoint(pair[0]) for pair in two_texts]\n three_file_subjects = [get_subject(triad[0]) for triad in three_files]\n three_file_timepoints = [get_timepoint(triad[0]) for triad in three_files]\n pair_subjects = [get_subject(pair[0]) for pair in pairs]\n pair_timepoints = [get_timepoint(pair[0]) for pair in pairs]\n\n af_files = ([item for sublist in pairs for item in sublist] +\n [item for sublist in two_texts for item in sublist] +\n [item for sublist in three_files for item in sublist] +\n [item for sublist in one_text for item in sublist])\n\n one_edat = list(set(all_files) - set(af_files))\n one_edat = [[edat] for edat in one_edat]\n one_edat_subjects = [get_subject(file_[0]) for file_ in one_edat]\n one_edat_timepoints = [get_timepoint(file_[0]) for file_ in one_edat]\n\n all_subjects = (one_text_subjects + two_text_subjects + three_file_subjects +\n pair_subjects + one_edat_subjects)\n all_notetype = (([\"one_text\"] * len(one_text_subjects)) +\n ([\"two_texts\"] * len(two_text_subjects)) +\n ([\"three_files\"] * len(three_file_subjects)) +\n ([\"pair\"] * len(pair_subjects)) +\n ([\"one_edat\"] * len(one_edat_subjects)))\n all_timepoints = (one_text_timepoints + two_text_timepoints +\n three_file_timepoints + pair_timepoints +\n one_edat_timepoints)\n all_file_sets = one_text + two_texts + three_files + pairs + one_edat\n\n organized_dir = org_dir_dict.get(task_name)\n\n for i_subj in range(len(all_subjects)):\n month = timepoint_dict.get(task_name).get(all_timepoints[i_subj])\n files_note = note_dict.get(all_notetype[i_subj])\n if len(all_subjects) > 4:\n try:\n print(\"Successfully organized %s-%s\" % (all_subjects[i_subj], month))\n print(\"Moved:\")\n subject_id = all_subjects[i_subj]\n files = all_file_sets[i_subj]\n note = organize_files(subject_id, month, files, organized_dir)\n note.append(files_note)\n orged = 1\n orgedwhen = time.strftime(\"%Y/%m/%d\")\n orgedby = \"PY\"\n except IOError:\n print(\"%s-%s couldn't be organized.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n note = files_note\n orged = 0\n orgedwhen = \"\"\n orgedby = \"\"\n\n try:\n if all_notetype[i_subj] == \"pair\":\n print(\"Successfully converted %s-%s\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 1\n convedwhen = time.strftime(\"%Y/%m/%d\")\n convedby = \"PY\"\n else:\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n except IOError:\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n else:\n print(\"%s-%s couldn't be organized.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n note = files_note\n orged = 0\n orgedwhen = \"\"\n orgedby = \"\"\n print(\"%s-%s couldn't be converted.\" % (all_subjects[i_subj], all_timepoints[i_subj]))\n conved = 0\n convedwhen = \"\"\n convedby = \"\"\n\n csv_data = add_subject(csv_data, all_subjects[i_subj],\n all_timepoints[i_subj], orged, orgedwhen, orgedby,\n conved, convedwhen, convedby, note)\n\n csv_data = csv_data[colnames]\n csv_data.to_csv(csv_file, index=False)", "def _add_parsed_files(self, files):\n path = os.path.join(self.path, 'roas_parsed.csv')\n with open(path, 'w+') as f:\n for line in files:\n f.write(line + '\\n')\n utils.csv_to_db(Historical_ROAs_Parsed_Table, path)", "def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)", "def joinRows(r1,r2,outfname):\n outf = open(outfname,'w')\n f1 = file(r1,'r')\n f2 = file(r2,'r')\n for row1 in f1:\n if row1.strip() > '':\n row2 = f2.next()\n outf.write('%s%s\\n' % (row1.strip(),row2.strip()))\n outf.close()", "def main():\n with open('csv_files/products.csv', 'a') as data_file:\n # Move to the next line before appending new row to the file\n data_file.write(\"\\n\")\n data_writer = csv.writer(data_file)\n for i in range(5, 10000):\n data_writer.writerow([str(i+1), \" description{}\".format(str(i)),\n \" type{}\".format(str(i)),\n \" {}\".format(str(random.randint(1, 100)))])\n\n with open('csv_files/customers.csv', 'a') as data_file:\n # Move to the next line before appending new row to the file\n data_file.write(\"\\n\")\n data_writer = csv.writer(data_file)\n for i in range(5, 10000):\n data_writer.writerow([str(i+1), \" first_name{}\".format(str(i)),\n \" last_name{}\".format(str(i)),\n \" address{}\".format(str(i)),\n \" phone_number{}\".format(str(i)),\n \" email{}\".format(str(i))])", "def read_input_files(self):\r\n\r\n for input_file in self.list_of_input_files:\r\n input_file.read_header_of_file()\r\n self.list_of_header_objects.extend(input_file.list_of_header_objects)\r\n self.list_of_header_objects_without_ID.extend(input_file.list_of_header_objects_without_ID)\r\n self.list_of_contigs.extend(input_file.list_of_contigs)\r\n\r\n self.list_of_header_objects = list(toolz.unique(self.list_of_header_objects, key=lambda x: x.tag_and_ID))\r\n self.list_of_header_objects_without_ID = list(\r\n toolz.unique(self.list_of_header_objects_without_ID, key=lambda x: x.line))\r\n self.list_of_contigs = list(toolz.unique(self.list_of_contigs, key=lambda x: x.line))\r\n self.list_of_header_objects.extend(self.list_of_header_objects_without_ID)\r\n self.list_of_header_objects.sort(key=lambda x: x.line)\r\n self.list_of_header_objects.extend(self.list_of_contigs)\r\n self.list_of_header_objects.sort(key=lambda x: x.tag, reverse=False)\r\n self.create_body_header_line_for_output()\r\n self.write_header_in_output_file()\r\n\r\n list_of_chrom = list(self.indices.keys())\r\n list_of_chrom.sort(key=lambda x: self.alphanum_key(x))\r\n for chrom in list_of_chrom:\r\n self.list_of_body_objects.clear()\r\n for input_file in self.list_of_input_files:\r\n input_file.read_specific_chrom_body_of_file(chrom)\r\n self.list_of_body_objects.extend(input_file.list_of_body_objects)\r\n\r\n self.adjust_body_records_to_samples()\r\n self.list_of_body_objects = list(toolz.unique(self.list_of_body_objects, key=lambda x: x.line))\r\n self.list_of_body_objects.sort(key=lambda x: self.alphanum_key(x.line))\r\n self.verify_and_merge_body_records()\r\n self.write_specific_chrom_in_output_file()", "def preprocess_files(file_path):\n # checking your current working directory\n cur_dir = os.getcwd()\n\n # Get your current folder and sub folder event data\n data_dir = os.path.join(cur_dir, 'event_data')\n\n # Create a for loop to create a list of files and collect each\n # file_path\n file_path_list = []\n for root, dirs, files in os.walk(data_dir):\n # join the file path and roots with the subdirectories using\n # glob\n file_path_list = glob.glob(os.path.join(root, '*'))\n\n full_data_rows_list = []\n\n # for every file_path in the file path list collect records\n for f in file_path_list:\n\n # reading csv file\n with open(f, 'r', encoding='utf8', newline='') as csvfile:\n\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n next(csvreader)\n\n # extracting each data row one by one and append it\n for line in csvreader:\n full_data_rows_list.append(line)\n\n csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL,\n skipinitialspace=True)\n\n # create one file with all the records\n with open(file_path, 'w', encoding='utf8',\n newline='') as f:\n writer = csv.writer(f, dialect='myDialect')\n writer.writerow(\n ['artist', 'firstName', 'gender', 'itemInSession',\n 'lastName', 'length', 'level', 'location', 'sessionId',\n 'song', 'userId'])\n for row in full_data_rows_list:\n if row[0] == '':\n continue\n writer.writerow((row[0], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[12], row[13],\n row[16]))", "def end_result_csv(final_list, mes):\n\n def filna_dict(mes):\n \"\"\"werkt. maar ga een dict comprehension proberen.\"\"\"\n key = [f'pdf_{count + 1}' for count in range(mes)]\n value = ['stans.pdf' for count in range(mes)]\n filna_tobe_inserted = dict(zip(key, value))\n return filna_tobe_inserted\n\n fin = filna_dict(mes)\n\n for pad in final_list:\n\n df = pd.read_csv(pad, delimiter=\";\")\n df.fillna(fin, inplace=True)\n df.to_csv(pad, index=0)\n print(pad)\n\n return fin", "def load_data(messages_filepath, categories_filepath):\n # Load the data files\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n df = messages.merge(categories, how = 'outer' , on =['id'])\n return df", "def merge_data(self, merge_source, keys, tags, replace=False, overwrite=False, queries=[]):\n import hxl.filters\n return hxl.filters.MergeDataFilter(self, merge_source, keys, tags, replace, overwrite, queries=queries)", "def merge(csv1_path, csv2_path, csv_out_path):\n df1 = prepare_df(csv1_path, var_name='variable1')\n df2 = prepare_df(csv2_path, var_name='variable2')\n\n if df1.shape[0] != df2.shape[0]:\n raise MergerError(\"Dataframes with different number of rows\")\n\n df_merge = pd.merge(df1[[ROW_ID, STORE_ID, ADDRESS, ADDRESS_NORM, VALUE]],\n df2[[ROW_ID, ADDRESS, ADDRESS_NORM, VALUE]],\n on=ADDRESS_NORM)\n df_merge[RATIO] = df_merge.apply(lambda row: DECIMALS % (row.value_x / row.value_y) if row.value_y > 0 else -1,\n axis=1)\n df_merge = df_merge[[STORE_ID, VALUE + '_x', VALUE + '_y', RATIO]]\n df_merge.columns = [STORE_ID, VAR1, VAR2, RATIO]\n\n df_merge.to_csv(csv_out_path, sep=\";\", index=False)", "def bed_merge(output_file, *inputfiles):\n working_dir = os.path.dirname(inputfiles[0]);\n temp_file1 = working_dir + os.sep + \"temp_dfj304jfd.txt\";\n\n #Concatenate input files\n cat_command = ['cat'];\n cat_command.extend(inputfiles);\n with open(temp_file1, 'w') as fout:\n sp.check_call(cat_command, stdout=fout);\n\n #Sort file to be merged\n temp_file2 = working_dir + os.sep + \"temp_fje094j3.txt\";\n with open(temp_file2, 'w') as fout:\n sp.check_call(['sortBed','-i',temp_file1], stdout=fout);\n\n #Merge file\n if(output_file.find(os.sep) == -1):\n output_file = working_dir + os.sep + output_file;\n\n with open(output_file, 'w') as fout:\n sp.check_call(['bedtools','merge','-i',temp_file2], stdout=fout);\n\n #Clean up temporary files\n os.remove(temp_file1);\n os.remove(temp_file2);\n\n return output_file;", "def merge_docs(self):", "def tag_file_process(self, multiple_files):\n # the path is now becoming a string since it goes through the UI\n # text entry box, not a list or tuple any more, so we turn it to a\n # list of paths\n file_list = multiple_files.split(' ')\n # the main dictionary to store all tags\n tag_dict = dict()\n rows = []\n # now for all the tag file under the folder(root directory), we load\n # the data into the dictionary\n if len(file_list) == 0:\n tk.messagebox.showwarning('warning', 'no files chosen')\n else:\n for file_path in file_list:\n if os.path.isfile(file_path):\n with open(file_path, 'r', encoding='utf-8') as \\\n current_tag_file:\n # initialize the dictionary and the inner dictionary\n reader = csv.reader(current_tag_file)\n for row in reader:\n # the encode, decode is use to resolve the \"\\ueffa\"\n # BOM-utf8 problem\n row[0] = row[0].encode('utf-8').decode('utf-8-sig')\n tag_dict[row[0]] = dict()\n rows.append(row)\n # store the tag into the dictionary\n for row in rows:\n # the 1st column is the main key(mob fact col name)\n # the 2nd column is the tag id\n # the 3rd column is the tag with real meaning\n tag_dict[row[0]][row[1]] = row[2]\n\n else:\n tk.messagebox.showinfo('warning', 'can not obtain: ' +\n file_path)\n return tag_dict", "def merge(mergeFiles,mergeDb,createDB,dbase,v,dfile):\n\tglobal verbose\n\n\tverbose = v\n\tif len(mergeFiles) > 0:\n\t\tfor f in mergeFiles:\n\t\t\tprint \"Merge => \"+ f\n\t\t\ttry:\n\t\t\t\tfl = open(f,'r')\n\t\t\t\tProcessEntryFile(fl)\n\t\t\t\tfl.close()\n\t\t\t\tif verbose >= 1:\n\t\t\t\t\tprint reference\n\t\t\texcept IOError:\n\t\t\t\tprint 'File '+f +' cannot be open'\n\n\tif len(mergeDb) > 0:\n\t\tfor f in mergeDb:\n\t\t\tprint \"Merge => \"+ f\n\t\t\tProcessEntryBase(f)\n\t\t\tif verbose >= 1:\n\t\t\t\tprint reference\n\t\n\tif dfile != '':\n\t\ttry:\n\t\t\tif os.path.exists(dfile):\n\t\t\t\tos.remove(dfile)\n\t\t\tfref = open(dfile,'w')\n\t\t\tput_in_file('',fref,reference)\n\t\t\tfref.close()\n\t\t\tif os.path.exists(afile):\n\t\t\t\tos.remove(afile)\n\t\t\tfref = open(afile,'w')\n\t\t\tput_in_afile('',fref,reference)\n\t\t\tfref.close()\n\t\texcept IOError:\n\t\t\tprint 'Cannot open '+dfile+' file'\n\n\tif dbase != '':\n\t\tput_in_db(dbase,reference,createDB)", "def merge_on_fip(df, directory):\n pol_fip_data = pd.read_csv(directory, index_col=0)\n df = df.merge(pol_fip_data, left_on='GESTFIPS', right_on='fips' ,how='left')\n df.drop(columns=['fips'], inplace=True)\n \n return df", "def merge():\n fw = codecs.open(MATCHED_TEMPLATE_LABEL_ALL, 'w', 'utf-8')\n for en, zh in sorted(all_matched.iteritems()):\n fw.write(en+'\\t\\t'+zh+'\\n')\n fw.close()", "def import_text():\n known = collections.OrderedDict()\n match = collections.OrderedDict()\n\n f = open(DIR_LOCATION, 'rU')\n reader = csv.DictReader(f)\n\n #read in csv, assumes there is a col = known and one = match\n for row in reader:\n if row['known'] != '':\n known[row['known']] = row['known_prop']\n\n if row['match'] != '':\n match[row['match']] = row['match_prop']\n\n return known, match", "def load_data(data_links_list=(\n 'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'\n '/raw_data/raw_data.csv',\n 'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'\n '/raw_data/sample_meta_info.tsv')):\n\n # Reading data sets from the links provided.\n df1 = pd.read_csv(data_links_list[0],\n error_bad_lines=False)\n df2 = pd.read_csv(data_links_list[1],\n sep='\\t')\n df2 = df2.set_index(df2['project'])\n # fill the Nas id df1 as \". Makes the groupbys behave better.\n df1.fillna('', inplace=True)\n # repleace 'genus' = 'other' with an empty string to be consistent.\n df1.replace(to_replace='other', value='', inplace=True)\n # Removing duplicate columns.\n del df2['project']\n del df2['ID']\n df1 = df1.set_index(df1['project'])\n # Removing duplicate column.\n del df1['project']\n # Joining the two datasets.\n df = df1.join(df2)\n # Uniformity in non-capitalization of column names.\n df.rename(columns={'Kingdom': 'kingdom', 'Phylum': 'phylum',\n 'Class': 'class', 'Order': 'order',\n 'Family': 'family', 'Genus': 'genus',\n 'Length': 'length'}, inplace=True)\n df.index.names = ['sampleID']\n # Rearranging columns so that abundance is the last column.\n df = df[['kingdom',\t'phylum', 'class', 'order',\n 'family', 'genus', 'length', 'oxygen',\n 'replicate', 'week', 'abundance']]\n assert isinstance(df, pd.DataFrame)\n return df", "def groupDataByModel(self, input_dirs):\n t1 = time.time()\n print(\"merging files by model to %s\" % MERGE_DIR)\n ensure_dir(MERGE_DIR)\n fileByModel = dict()\n randomByModel = dict()\n totalLineMerged = 0\n for inputDir in input_dirs:\n for file in walktree(inputDir):\n for line in openFile(file):\n fields = line.split(self.delimiter)\n if self.ignore_invalid:\n if len(fields) != 4 or fields[0] == '' or fields[\n 1] == '' or fields[2] == '' or fields[3] == '':\n print('Ingonre Invalid line', fields)\n continue\n model = fields[0]\n if model not in fileByModel:\n fileByModel[model] = open('%s/%s.txt' % (MERGE_DIR, model), 'w')\n randomByModel[model] = random.Random()\n\n if self.sample >= 1.0 or randomByModel[model].random() < self.sample:\n fileByModel[model].write(line)\n totalLineMerged += 1\n for file in list(fileByModel.values()):\n file.close()\n t2 = time.time()\n print('Total line proccessed {}'.format(totalLineMerged))\n print(\"merging files take %ss\" % (t2 - t1))\n\n if self.use_mask:\n fileByModel = self.removeMaskData(fileByModel)\n return fileByModel", "def mergeThreads ():\n \n print \"merge threads starts\"\n data = np.zeros((1, 6))\n for threadID in xrange(0, 4):\n filename = \"TOF_nt_TOF_t%d.csv\"%(threadID)\n dataThread = np.loadtxt(filename, delimiter = \",\", skiprows = 9)\n data = np.vstack((data, dataThread))\n savefmt = [\"%.5g\", \"%.5g\", \"%.5g\", \"%d\", \"%d\", \"%d\"]\n np.savetxt(\"TOFfile.dat\", data[1:, :], fmt = savefmt)\n print \"merge threads finished\"" ]
[ "0.6509552", "0.6354343", "0.6310943", "0.6292182", "0.6235866", "0.6196554", "0.6100369", "0.6056441", "0.59984165", "0.59235424", "0.5832123", "0.5764415", "0.5726251", "0.5702031", "0.56843436", "0.56209385", "0.56083906", "0.5602722", "0.560202", "0.5594253", "0.5588563", "0.5587008", "0.5567416", "0.5506216", "0.5498276", "0.54889256", "0.5473502", "0.54641944", "0.5438319", "0.54148406", "0.5396474", "0.5391705", "0.5348734", "0.53394717", "0.53197956", "0.5295065", "0.5284222", "0.5276898", "0.5272107", "0.52712363", "0.52440417", "0.52264494", "0.52230424", "0.52087027", "0.51999587", "0.5187576", "0.51852554", "0.51789045", "0.5171793", "0.5130347", "0.5119347", "0.50902575", "0.50826275", "0.5075883", "0.5058818", "0.50584865", "0.5055669", "0.5048017", "0.50461894", "0.5045485", "0.5045017", "0.503984", "0.50257385", "0.49939716", "0.49917015", "0.4982141", "0.49810892", "0.4975568", "0.49635208", "0.49629542", "0.49440166", "0.49409348", "0.49397758", "0.49395826", "0.4935081", "0.49316162", "0.4925729", "0.4925257", "0.49204326", "0.4910426", "0.49074543", "0.49063677", "0.49027148", "0.48987338", "0.4896023", "0.489398", "0.48919374", "0.48918426", "0.48900726", "0.4878589", "0.4878403", "0.48685232", "0.48546407", "0.4850015", "0.48482668", "0.48457402", "0.48424423", "0.48411554", "0.48370546", "0.48346275" ]
0.604583
8
Steps to fetch data from a repo. Return data is passed to self._present
def _fetch_data(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def fetch_data(self) -> GitHubRepositoryModel:\n result = await self._client.repos.get(self.repository)\n return result.data", "def fetch_repo(data):\n repo = Repository.objects.get(**data)\n\n # create a temporary directory\n tmp_dir = util.tmp_dir('github')\n\n # log\n log.info(\"Fetching repo %s to %s\", repo.full_name, tmp_dir)\n\n # clone the repository to the directory\n git.Repo.clone_from(repo.git_url, tmp_dir)\n\n # add the repo path to the database\n repo.local_path = tmp_dir\n repo.save()\n\n # tell workers the repo is available\n publish('github.repo_available', data)", "def do_fetch(self):\n pass", "def _fetch(self):\n self._data = self._get(self.url)\n\n if self._data['released_errata'] is not None:\n self._released_errata = Erratum(errata_id=self._data[\n 'released_errata']['id'])\n\n for errata_dict in self._data['all_errata']:\n errata = Erratum(errata_id=errata_dict['id'])\n self._all_errata.append(errata)\n\n self._signed_rpms = self._data.get('rpms_signed')\n\n for et_file in self._data['files']:\n self._files.append(et_file['path'])", "def fetch(self, remote, *args):\n return self.cmd('fetch', remote, *args)", "def fetch(self):\n\n url = 'https://github.com/users/' + self.username + '/contributions'\n page = urllib.request.urlopen(url)\n graph_data = page.read()\n\n self._graph_data_valid(graph_data)\n self._parse_graph_data(graph_data)", "def __gitFetch(self):\n self.vcs.gitFetch(self.project.getProjectPath())", "def fetch(self, data: Any, *args: Any, **kwargs: Any):\n product = None\n next_args = (data, *args)\n next_kwargs = kwargs\n for name, method, outlet, description in self.steps:\n product, new_args, next_kwargs = method(*next_args, **next_kwargs)\n next_args = (product, *new_args)\n if isinstance(product, self.outlet):\n return product\n else:\n raise RuntimeError(\"Process was not completed according to specification.\")", "def fetch(self):\n pass", "def fetch(self):\n pass", "def fetch(self) -> None:\n pass", "def read(self, *args, **kwargs):\r\n\r\n if not self.current_repo:\r\n # get the first available repository\r\n self.current_repo = next(self.repo_list)\r\n\r\n if self.current_repo in self.repo_done:\r\n try:\r\n # get the next available repository\r\n self.current_repo = next(self.repo_list)\r\n # call self to get the next iteration \r\n self.read() \r\n except StopIteration:\r\n raise(\"repository exhausted\")\r\n\r\n else:\r\n # iterate to get all data until (GITHUB_SUPPORTED_RESOURCES is exhausted)\r\n resource = self.build_resource(page=self.page, per_page=self.per_page)\r\n if resource: \r\n if self.current_result.get(self.current_repo, None):\r\n self.current_result['owner'] = self.owner\r\n self.current_result['repo'] = self.current_repo\r\n self.current_result['resource'] = resource \r\n else: \r\n self.current_result['resource'] = resource \r\n self.current_result['page'] = self.page\r\n self.current_result['per_page'] = self.per_page \r\n \r\n # increment pagination\r\n self.page += settings.DEFAULT_PAGE\r\n self.per_page += settings.DEFAULT_PER_PAGE\r\n else:\r\n self.repo_done.append(self.current_repo)\r\n # reset pagination\r\n self.page = settings.DEFAULT_PAGE\r\n self.per_page = settings.DEFAULT_PER_PAGE\r\n \r\n return self.current_result", "def fetch_data(self):", "def fetch(self) -> None:\n workflow_spec_path = os.path.join(self._output_dir, self._spec)\n self._download_file(self._parsed_url.original_url, workflow_spec_path)", "def fetch_from_db(self):\n self._potential_deals = DBApi.get_instance().potential_records\n self._filters = DBApi.get_instance().filters\n # Add markdown for url\n for data in self._potential_deals:\n data[\"url\"] = f\"[Link]({data['url']})\"\n self._potential_deals_cols = self._db_api.get_potential_deal_columns()\n self._years = self._db_api.get_unique_years(self._potential_deals)\n self._make_model = self._db_api.get_all_make_models()\n self._action_options = [\"Action1\", \"Action2\", \"Action3\"]", "async def fetch_data(self) -> GitHubCommitModel | None:\n result = await self._client.repos.list_commits(\n self.repository, **{\"params\": {\"per_page\": 1}}\n )\n return result.data[0] if result.data else None", "def fetch(self):\n raise NotImplementedError()", "def fetch_repos(self):\n for repo in self.json_repos['repos']:\n title = str(repo[\"title\"])\n repo_url = str(repo['repo'])\n self.repos[title] = repo_url", "def fetch(self, path, **kwargs):\n self.http_client.fetch(self.get_url(path), self.stop, **kwargs)\n return self.wait()", "def _fetch(self, fetch):\n if fetch == 'posts':\n if self['handle'] and not self['guid']: self.fetchhandle()\n else: self.fetchguid()\n elif fetch == 'data' and self['handle']:\n self.fetchprofile()", "async def _fetch_data(self) -> T:\n raise NotImplementedError", "def run(self):\n results = self.fetch()\n return results", "def fetch_data():\n data.fetch_data()\n data.start_updating()", "def fetch(args):\n do_all_projects_remotes(args + [\"fetch\"])", "async def fetch_get(self, command, data):\n _LOGGER.debug(\"[Foobar2k] Running fetch GET\")\n async with self._session.get(\"{base_url}{command}\".format(\n base_url=self._base_url, command=command), data=data) as resp_obj:\n response = await resp_obj.text()\n if (resp_obj.status == 200 or resp_obj.status == 204):\n _LOGGER.debug(\"[Foobar2k] Have a response\")\n return response\n else:\n _LOGGER.error(f\"Host [{self._host}] returned HTTP status code [{resp_obj.status}] to GET command at \"\n \"end point [{command}]\")\n return None", "async def fetch_data(self) -> T:", "def fetch():\n project = get_project(require=True)\n resp = request('post', '/api/v0/projects/{id}/fetch/'.format(id=project.id))\n data = resp.json()\n commits = data.get('commits', ())\n if commits:\n for commit in commits:\n success('Fetched: {ref} ({identifier})'.format(ref=commit['ref'], identifier=commit['identifier']))\n success('{n} new commits were fetched!'.format(n=len(commits)))\n else:\n info('No new commits.')\n errors = data.get('errors', ())\n for error in errors:\n warning(error)", "def fetch_data(self):\n if not self.json_query:\n self.generate_json_query()\n\n response = search_graphql(self.json_query)\n\n if \"errors\" in response:\n print(\"ERROR encountered in fetch_data().\")\n for error in response['errors']:\n print(error['message'])\n\n return\n\n self.response = response\n\n if len(self.response['data'][self.data_type.value]) != len(self.id):\n print(\"WARNING: one or more IDs not found in the PDB.\")", "def fetch(self):\r\n if not self._fetched:\r\n self._fetched = True\r\n self.data = query_cache.get(self.iden) or []", "def __hgFetch(self):\n shouldReopen = self.vcs.getExtensionObject(\"fetch\").hgFetch(\n self.project.getProjectPath())\n if shouldReopen:\n res = E5MessageBox.yesNo(\n None,\n self.tr(\"Fetch\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def fetch(self):\n if self.host.filesystem.exists(self.path):\n _log.info('WPT checkout exists at %s, fetching latest', self.path)\n self.run(['git', 'fetch', 'origin'])\n self.run(['git', 'reset', '--hard', 'origin/master'])\n return\n\n _log.info('Cloning GitHub web-platform-tests/wpt into %s', self.path)\n if self.gh_token:\n remote_url = WPT_GH_SSH_URL_TEMPLATE.format(self.gh_token)\n else:\n remote_url = WPT_MIRROR_URL\n _log.info('No credentials given, using wpt mirror URL.')\n _log.info(\n 'It is possible for the mirror to be delayed; see https://crbug.com/698272.'\n )\n # Do not use self.run here because self.path doesn't exist yet.\n self.host.executive.run_command(\n ['git', 'clone', remote_url, self.path])\n\n _log.info('Setting git user name & email in %s', self.path)\n self.run(['git', 'config', 'user.name', DEFAULT_WPT_COMMITTER_NAME])\n self.run(['git', 'config', 'user.email', DEFAULT_WPT_COMMITTER_EMAIL])", "async def fetch_data(self) -> GitHubReleaseModel | None:\n result = await self._client.repos.releases.list(\n self.repository, **{\"params\": {\"per_page\": 1}}\n )\n if not result.data:\n return None\n\n for release in result.data:\n if not release.prerelease:\n return release\n\n # Fall back to the latest release if no non-prerelease release is found\n return result.data[0]", "def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise", "def fetch_mirror(self, repo, body):\n url = self._repo_url(repo, other='/mirror')\n response = self.rest.get(url)\n\n if response.status_code is not 200:\n self.module.fail_json(msg=response.info)\n return response.info", "async def fetch_data(self):\n url = URL_HASSIO_VERSION.format(self.upstream)\n try:\n _LOGGER.info(\"Fetch update data from %s\", url)\n with async_timeout.timeout(10, loop=self.loop):\n async with self.websession.get(url) as request:\n data = await request.json(content_type=None)\n\n except (aiohttp.ClientError, asyncio.TimeoutError, KeyError) as err:\n _LOGGER.warning(\"Can't fetch versions from %s -> %s\", url, err)\n return\n\n except json.JSONDecodeError as err:\n _LOGGER.warning(\"Can't parse versions from %s -> %s\", url, err)\n return\n\n # data valid?\n if not data:\n _LOGGER.warning(\"Invalid data from %s\", url)\n return\n\n # update versions\n self._data[ATTR_HOMEASSISTANT] = data.get('homeassistant')\n self._data[ATTR_HASSIO] = data.get('hassio')\n self.save()", "def fetch(self,url=URL):\n\t\tlog.info('downloading latest PHE case data')\n#\t\tself.data=lookup_json(url)\n\t\tself.fetch_csv() #JSON discontinued; switched back to CSV\n\t\tself.edition=self.latest_samples\n\t\tlog.info(f'Last samples from {self.edition}')", "async def fetch_data(self) -> IssuesPulls:\n base_issue_response = await self._client.repos.issues.list(\n self.repository, **{\"params\": {\"per_page\": 1}}\n )\n pull_response = await self._client.repos.pulls.list(\n self.repository, **{\"params\": {\"per_page\": 1}}\n )\n\n pulls_count = pull_response.last_page_number or 0\n issues_count = (base_issue_response.last_page_number or 0) - pulls_count\n\n issue_last = base_issue_response.data[0] if issues_count != 0 else None\n\n if issue_last is not None and issue_last.pull_request:\n issue_response = await self._client.repos.issues.list(self.repository)\n for issue in issue_response.data:\n if not issue.pull_request:\n issue_last = issue\n break\n\n return IssuesPulls(\n issues_count=issues_count,\n issue_last=issue_last,\n pulls_count=pulls_count,\n pull_last=pull_response.data[0] if pulls_count != 0 else None,\n )", "def _fetch_milestone_info(self):\n milestones = \"{}/milestones/{}\".format(self._base_url, self._milestone)\n with urllib.request.urlopen(milestones) as h:\n self.milestone_info = json.load(h)", "def fetch(self, only_if_updated=True):\n raise NotImplementedError()", "def fetch(self):\n # This method also sets self._results_filtered and\n # self._urltable.\n page = self._conn.fetch_page(self._ddg_url.relative())\n\n if logger.isEnabledFor(logging.DEBUG):\n import tempfile\n fd, tmpfile = tempfile.mkstemp(prefix='ddgr-response-')\n os.close(fd)\n with open(tmpfile, 'w', encoding='utf-8') as fp:\n fp.write(page)\n logger.debug(\"Response body written to '%s'.\", tmpfile)\n\n parser = DdgParser(news=self._ddg_url.news)\n parser.feed(page)\n\n self.results = parser.results\n self._results_filtered = parser.filtered\n self._urltable = {}\n for r in self.results:\n self._urltable.update(r.urltable())", "def fetch_data(self):\n\n data_dict = {\n 'price': self.get_current_price(),\n }\n\n return self.save_data(data_dict)", "def __gitBundleFetch(self):\n self.vcs.gitBundleFetch(self.project.getProjectPath())", "def get_repo_info(loader, sha, prov_g):\n user_repo = loader.getFullName()\n repo_title = loader.getRepoTitle()\n repo_desc = loader.getRepoDescription()\n contact_name = loader.getContactName()\n contact_url = loader.getContactUrl()\n commit_list = loader.getCommitList()\n licence_url = loader.getLicenceURL() # This will be None if there is no license\n\n # Add the API URI as a used entity by the activity\n if prov_g:\n prov_g.add_used_entity(loader.getRepoURI())\n\n prev_commit = None\n next_commit = None\n version = sha if sha else commit_list[0]\n if commit_list.index(version) < len(commit_list) - 1:\n prev_commit = commit_list[commit_list.index(version) + 1]\n if commit_list.index(version) > 0:\n next_commit = commit_list[commit_list.index(version) - 1]\n\n info = {\n 'version': version,\n 'title': repo_title,\n 'description': repo_desc,\n 'contact': {\n 'name': contact_name,\n 'url': contact_url\n } \n }\n if licence_url:\n info['license'] = {\n 'name': 'License',\n 'url': licence_url\n }\n\n if type(loader) is GithubLoader:\n basePath = '/api-git/' + user_repo + '/'\n basePath += ('subdir/' + loader.subdir + '/') if loader.subdir else ''\n basePath += ('commit/' + sha + '/') if sha else ''\n if type(loader) is GitlabLoader:\n basePath = '/api-gitlab/' + user_repo + '/query/' \n basePath += ('branch/' + loader.branch + '/') if loader.branch else ''\n basePath += ('subdir/' + loader.subdir.strip('/') + '/') if loader.subdir else ''\n basePath += ('commit/' + sha + '/') if sha else ''\n elif type(loader) is LocalLoader:\n basePath = '/api-local/'\n elif type(loader) is URLLoader:\n basePath = '/api-url/'\n else:\n # TODO: raise error\n glogger.error('Cannot set basePath, loader type unkown')\n\n return prev_commit, next_commit, info, basePath", "def fetch(self, vault_client):\n result = self.read(vault_client)\n if result:\n if isinstance(result, dict) and 'data' in result:\n self.existing = result['data']\n else:\n self.existing = result\n else:\n self.existing = None", "def fetch(self) -> Fetch:\n return self._fetch", "def _fetch(self, sha: str) -> None:\n # have multiple threads downloading in parallel\n queue = [sha]\n pending: Set[str] = set()\n downloaded: Set[str] = set()\n input_queue: \"Queue[Union[str, Poison]]\" = Queue() # requesting downloads\n output_queue: \"Queue[Union[str, Poison]]\" = Queue() # completed downloads\n procs = []\n for _ in range(self._processes):\n target = Binder(self, \"_download\")\n args = (input_queue, output_queue)\n # use multiprocessing.dummy to use threads instead of processes\n proc = multiprocessing.dummy.Process(target=target, args=args)\n proc.daemon = True\n proc.start()\n procs.append(proc)\n self._trace(\"\", level=Level.INFO, exact=True) # for showing progress\n done = total = 0\n while queue or pending:\n if queue:\n # if possible, queue up download\n sha = queue.pop()\n if sha in downloaded or sha in pending:\n continue\n if git.object_exists(sha):\n if sha == git.EMPTY_TREE_HASH:\n # git.object_exists() returns True for the empty\n # tree hash even if it's not present in the object\n # store. Everything will work fine in this situation,\n # but `git fsck` will complain if it's not present, so\n # we explicitly add it to avoid that.\n git.write_object(\"tree\", b\"\")\n if not git.history_exists(sha):\n # this can only happen in the case of aborted fetches\n # that are resumed later\n self._trace(\"missing part of history from %s\" % sha)\n queue.extend(git.referenced_objects(sha))\n else:\n self._trace(\"%s already downloaded\" % sha)\n else:\n pending.add(sha)\n input_queue.put(sha)\n else:\n # process completed download\n res = output_queue.get()\n if isinstance(res, Poison):\n # _download never puts Poison with an empty message in the output_queue\n assert res.message is not None\n self._fatal(res.message)\n pending.remove(res)\n downloaded.add(res)\n queue.extend(git.referenced_objects(res))\n # show progress\n done = len(downloaded)\n total = done + len(pending)\n pct = int(float(done) / total * 100)\n message = \"\\rReceiving objects: {:3.0f}% ({}/{})\".format(pct, done, total)\n self._trace(message, level=Level.INFO, exact=True)\n if total:\n self._trace(\n \"\\rReceiving objects: 100% ({}/{}), done.\\n\".format(done, total),\n level=Level.INFO,\n exact=True,\n )\n for proc in procs:\n input_queue.put(Poison())\n for proc in procs:\n proc.join()", "def fetch_the_data():\n subprocess.run([\"wget\", \"https://storage.googleapis.com/recipe-box/recipes_raw.zip\"])\n subprocess.run([\"unzip\", \"recipes_raw.zip\", \"-d\", RECIPES_DIRPATH])\n subprocess.run([\"rm\", \"recipes_raw.zip\"])", "def fetch(self):\n try:\n self.genre = 'Review'\n log.debug(self.log_msg(\"Fetching the prouct page url %s\"%self.currenturi))\n res=self._getHTML(self.currenturi) # Assuming self.currenturi is at the product page\n self.rawpage=res['result']\n self._setCurrentPage()\n try:\n self.parent_page_title = stripHtml(self.soup.find('h1',{'id':'pgTitleDetail'}).renderContents())\n except:\n self.parent_page_title =''\n try:\n self.__product_price = self.soup.find('tbody',{'class':'prices'}).td.renderContents().replace('$','')\n except:\n log.exception(\"Error in fetching product_price\")\n self.__product_price = None\n\n parent_page_url = self.task.instance_data['uri']\n review_first_page_url = self.soup.find('a',text=\"Show All Customer Reviews &#187; \").parent['href']\n review_url_order = \"&sortReviewsBy=DateDescending\"\n self.currenturi = self.base_url + review_first_page_url + review_url_order\n log.info(self.log_msg('current_uri :: %s'%(self.currenturi)))\n self._getParentPage()\n self.next_url_links=[]\n self.fetch_next_link = True\n while self.fetch_next_link:\n self._iterateReviewPages(parent_page_url)\n return True\n except Exception,e:\n log.exception(self.log_msg(\"Exception occured in fetch()\"))\n return False", "def fetchall(self):\n try:\n self.repo.remotes.origin.fetch()\n except git.exc.GitCommandError as err:\n raise GitError(err)", "def svn_fetch(self, *arguments, **kwargs):\n return self.get_returncode('svn', 'fetch', *arguments, **kwargs)", "async def prep_fetch(self, verb, command, data = None, retries = 5):\n _LOGGER.debug(\"[Foobar2k] Running prep_fetch\")\n try:\n if self._session and not self._session.closed:\n if verb == HTTP_GET:\n return await self.fetch_get(command, data)\n else:\n return await self.fetch_post(command, data)\n async with aiohttp.ClientSession() as self._session:\n if verb == HTTP_GET:\n return await self.fetch_get(command)\n else:\n return await self.fetch_post(command, data)\n except ValueError:\n pass\n except ServerDisconnectedError as error:\n _LOGGER.debug(f\"[Foobar2k] Disconnected Error. Retry Count [{retries}]\")\n if retries == 0:\n raise error\n return await self.prep_fetch(command, data, retries=retries - 1)", "def _load(self):\n\n context = {\n \"username\": self.username,\n \"reponame\": self.reponame,\n \"name\": self.name\n }\n LOG.debug(\"Loading %s\" % self.branch_id)\n doc = self._client.getjson(path=\"/users/%(username)s/repos/%(reponame)s\"\n \"/branches/%(name)s\" % context)\n LOG.debug(\"doc loaded: %r\" % doc)\n slice_id = \"%(username)s/%(reponame)s/%(slice_id)s\" % {\n \"username\": self.username,\n \"reponame\": self.reponame,\n \"slice_id\": doc[\"slice_id\"]\n }\n self._slice = self._client.slice(slice_id)\n self._packages = doc[\"packages\"]", "def fetch(self, repopath=None):\n craftDebug.trace(\"HgSource.fetch called\")\n\n # get the path where the repositories should be stored to\n if repopath == None:\n repopath = self.repositoryUrl()\n\n # in case you need to move from a read only Url to a writeable one, here it gets replaced\n # repoString = utils.replaceVCSUrl( repopath )\n repopath = repopath.replace(\"[hg]\", \"\")\n repoUrl, repoBranch, _ = utils.splitVCSUrl(repopath)\n ret = True\n\n # only run if wanted (e.g. no --offline is given on the commandline) or no hg support is given by the python installation\n if (not self.noFetch and self.enableHg):\n # question whether mercurial stuff uses these proxies\n checkoutDir = self.checkoutDir()\n\n # check corrupted checkout dir\n if os.path.exists(checkoutDir) and not os.path.exists(checkoutDir + \"\\.hg\"):\n os.rmdir(checkoutDir)\n\n if not os.path.exists(checkoutDir):\n os.makedirs(checkoutDir)\n os.chdir(checkoutDir)\n ret = self.system(\"%s clone %s .\" % (self.hgExecutable, repoUrl)) # TODO: check return code for success\n\n if os.path.exists(checkoutDir):\n os.chdir(checkoutDir)\n ret = self.system(\n \"%s update %s\" % (self.hgExecutable, repoBranch)) # TODO: check return code for success\n else:\n craftDebug.log.debug(\"skipping hg fetch (--offline)\")\n return ret", "def fetch(self):\r\n if self.wp_op is None: # If we were already doing a list or save, just restart the fetch without changing the operation\r\n self.wp_op = \"fetch\"\r\n self.master.waypoint_request_list_send()", "def obtain(self, *args: Any, **kwargs: Any) -> None:\n self.ensure_dir()\n\n url = self.url\n\n self.log.info(\"Cloning.\")\n # todo: log_in_real_time\n self.cmd.clone(\n url=url,\n progress=True,\n depth=1 if self.git_shallow else None,\n config={\"http.sslVerify\": False} if self.tls_verify else None,\n log_in_real_time=True,\n )\n\n self.log.info(\"Initializing submodules.\")\n self.cmd.submodule.init(\n log_in_real_time=True,\n )\n self.cmd.submodule.update(\n init=True,\n recursive=True,\n log_in_real_time=True,\n )\n\n self.set_remotes(overwrite=True)", "def get_git_data():\n global event, commit_data, issue_data\n lock.acquire()\n commit_data = get_commits()\n issue_data = get_issues()\n lock.release()\n event.enter(3600, 1, get_git_data, ())", "def _fetch_journal_and_issue_data(self, **criteria):\n found_journal_issues = self._scieloapi.issues.filter(\n limit=1, **criteria)\n return self._scieloapi.fetch_relations(self._sapi_tools.get_one(found_journal_issues))", "def _fetch(cls, *args, **kwargs):\n apikey = htpc.settings.get('plexpy_apikey')\n\n if apikey is None:\n raise\n\n url = '%sapi/v2?apikey=%s&%s' % (cls._build_url(), apikey, urlencode(kwargs))\n\n try:\n r = requests.get(url, verify=False)\n r.raise_for_status()\n # Lets just copy the headers for now.\n cherrypy.response.headers['Content-Type'] = r.headers.get('Content-Type', 'application/json;charset=UTF-8')\n resp = r.json()\n if resp.get('response', {}).get('result') == 'success':\n return resp['response']['data']\n except:\n log.exception('Failed to get %s' % url)\n return", "def fetchContent(self):\n print 'fetching page by its path: '+ self.path\n uri = '%s?path=%s' % (self.client.MakeContentFeedUri(), self.path)\n # get the content feed\n feed = self.client.GetContentFeed(uri=uri)\n # take out the content\n self.entry = feed.get_webpages()[0]", "def get_com_data(self):\n self.form_url_str()\n if self.__print_url: print self.com_data_full_url\n self.download_json()\n self.get_datalist_fr_json()", "def fetch(self):\n self.genre = \"Review\"\n try:\n if not self.__setSoup():\n log.info(self.log_msg(\"Soup not set,returning false\"))\n return False\n #if not self._getParentPage():\n # log.info(self.log_msg(\"Parent page not found\"))\n while True:\n parent_page_soup = copy.copy(self.soup)\n # log.info(self.log_msg('current uri%s'%parent_page_soup))\n if not self.__addReviews():\n log.info(self.log_msg('fetched all reviews for the url %s'\\\n %self.task.instance_data['uri']))\n \n log.info(self.log_msg('Next page%s'%self.currenturi))\n try:\n \n # self.currenturi = self.task.instance_data['uri'].rsplit\\\n # ('/', 1)[0] + '/' + self.soup.find('a', \\\n # title='Go to the next page')['href']\n self.currenturi = 'http://www.phonedog.com' + parent_page_soup.find('a',title='Go to the next page')['href']\n \n if not self.__setSoup():\n log.info(self.log_msg('soup not set for the uri %s'%\\\n self.currenturi))\n break\n except:\n log.info(self.log_msg('Next page not found for the uri %s'%\\\n self.currenturi))\n break\n return True\n except:\n log.exception(self.log_msg(\"Exception in fetch\"))\n return False", "def fetch():\n return True", "def run(self, imap_obj: imaplib.IMAP4):\n func = 'fetch'\n args = self.__fetch_query.build()\n if self.__fetch_query.uids:\n func = 'uid'\n args = ('fetch',) + args\n\n typ, data = getattr(imap_obj, func)(*args)\n self.check_response(typ, data)\n for line, literals in build_imap_response_line(data):\n yield ImapFetchedItem(tokenize_atom_response(line, literals))", "async def hacs_repository_data(hass, connection, msg):\n hacs = get_hacs()\n repo_id = msg.get(\"repository\")\n action = msg.get(\"action\")\n data = msg.get(\"data\")\n\n if repo_id is None:\n return\n\n if action == \"add\":\n repo_id = extract_repository_from_url(repo_id)\n if repo_id is None:\n return\n\n if repo_id in hacs.common.skip:\n hacs.common.skip.remove(repo_id)\n\n if not hacs.get_by_name(repo_id):\n try:\n registration = await register_repository(repo_id, data.lower())\n if registration is not None:\n raise HacsException(registration)\n except (\n Exception,\n BaseException,\n ) as exception: # pylint: disable=broad-except\n hass.bus.async_fire(\n \"hacs/error\",\n {\n \"action\": \"add_repository\",\n \"exception\": str(sys.exc_info()[0].__name__),\n \"message\": str(exception),\n },\n )\n else:\n hass.bus.async_fire(\n \"hacs/error\",\n {\n \"action\": \"add_repository\",\n \"message\": f\"Repository '{repo_id}' exists in the store.\",\n },\n )\n\n repository = hacs.get_by_name(repo_id)\n else:\n repository = hacs.get_by_id(repo_id)\n\n if repository is None:\n hass.bus.async_fire(\"hacs/repository\", {})\n return\n\n _LOGGER.debug(\"Running %s for %s\", action, repository.data.full_name)\n try:\n if action == \"set_state\":\n repository.state = data\n\n elif action == \"set_version\":\n repository.data.selected_tag = data\n await repository.update_repository()\n\n repository.state = None\n\n elif action == \"install\":\n was_installed = repository.data.installed\n repository.data.selected_tag = data\n await repository.update_repository()\n await repository.async_install()\n repository.state = None\n if not was_installed:\n hass.bus.async_fire(\"hacs/reload\", {\"force\": True})\n\n elif action == \"add\":\n repository.state = None\n\n else:\n repository.state = None\n _LOGGER.error(\"WS action '%s' is not valid\", action)\n\n message = None\n except AIOGitHubAPIException as exception:\n message = exception\n except AttributeError as exception:\n message = f\"Could not use repository with ID {repo_id} ({exception})\"\n except (Exception, BaseException) as exception: # pylint: disable=broad-except\n message = exception\n\n if message is not None:\n _LOGGER.error(message)\n hass.bus.async_fire(\"hacs/error\", {\"message\": str(message)})\n\n await hacs.data.async_write()\n connection.send_message(websocket_api.result_message(msg[\"id\"], {}))", "def get_comics(self):\n self.get_json_urls()\n self.check_datastore()\n self.diff_sets()\n self.storage_logic()\n\n # Useful if we want to be interactive\n self.state['json'] = self.urls_from_json\n self.state['datastore'] = self.urls_from_datastore\n self.state['difference'] = self.difference\n self.state['json_count'] = len(self.urls_from_json)\n self.state['datastore_count'] = len(self.urls_from_datastore)\n self.state['difference_count'] = len(self.difference)", "def _get_repo(self, owner, repo):\n url = f\"{BASE_URL}/repos/{owner}/{repo}\"\n status, data, _ = self.get(url)\n if (status == 200):\n return data\n else:\n log.warning(\"GHUB\", f\"Unexpected status code {status} for request {url}.\")", "def get(self, *args, **kwargs):\r\n url = '{0}/user/repositories/'.format(self.parent.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def collect(self):\n repos = requests.get('http://{}:{}/api/unstable/dosocs/repos'.format(\n self.config['broker_host'],self.config['broker_port'])).json()\n\n for repo in repos:\n try:\n logger.info(f'Adding Repo Labor Data for Repo: {repo}')\n self.generate_value_data(repo['repo_id'], repo['path'])\n except Exception as e:\n logger.error(f'Error occured for Repo: {repo}')\n logger.exception(e)\n\n self.register_task_completion('value')\n\n # while True:\n # time.sleep(2)\n # logger.info(f'Maintain Queue Empty: {self._maintain_queue.empty()}')\n # logger.info(f'Queue Empty: {self._queue.empty()}')\n # if not self._queue.empty():\n # message = self._queue.get()\n # logger.info(f\"Popped off message from Queue: {message.entry_info}\")\n # self.working_on = \"UPDATE\"\n # elif not self._maintain_queue.empty():\n # message = self._maintain_queue.get()\n # logger.info(f\"Popped off message from Maintain Queue: {message.entry_info}\")\n # self.working_on = \"MAINTAIN\"\n # else:\n # break\n\n # if message.type == 'EXIT':\n # break\n\n # if message.type != 'TASK':\n # raise ValueError(f'{message.type} is not a recognized task type')\n\n # if message.type == 'TASK':\n # try:\n # repos = requests.get('http://{}:{}/api/unstable/dosocs/repos'.format(\n # self.config['broker_host'],self.config['broker_port'])).json()\n\n # for repo in repos:\n # self.generate_value_data(repo['repo_id'], repo['path'])\n\n # self.register_task_completion('value')\n\n # except Exception:\n # # logger.error(\"Worker ran into an error for task: {}\\n\".format(message.entry_info['task']))\n # # logger.error(\"Error encountered: \" + str(e) + \"\\n\")\n # # # traceback.format_exc()\n # # logger.info(\"Notifying broker and logging task failure in database...\\n\")\n\n # logger.exception(f'Worker ran into an error for task {message.entry_info}')\n # self.register_task_failure(message.entry_info['repo_id'],\n # message.entry_info['task']['given']['git_url'])\n\n # # Add to history table\n # task_history = {\n # \"repo_id\": message.entry_info['repo_id'],\n # \"worker\": self.config['id'],\n # \"job_model\": message.entry_info['task']['models'][0],\n # \"oauth_id\": self.config['zombie_id'],\n # \"timestamp\": datetime.datetime.now(),\n # \"status\": \"Error\",\n # \"total_results\": self.results_counter\n # }\n\n # if self.history_id:\n # self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history))\n # else:\n # r = self.helper_db.execute(self.history_table.insert().values(task_history))\n # self.history_id = r.inserted_primary_key[0]\n\n # logger.info(f\"Recorded job error for: {message.entry_info['task']}\")\n\n # # Update job process table\n # updated_job = {\n # \"since_id_str\": message.entry_info['repo_id'],\n # \"last_count\": self.results_counter,\n # \"last_run\": datetime.datetime.now(),\n # \"analysis_state\": 0\n # }\n # self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==message.entry_info['task']['models'][0]).values(updated_job))\n # logger.info(\"Updated job process for model: \" + message.entry_info['task']['models'][0] + \"\\n\")\n\n # # Reset results counter for next task\n # self.results_counter = 0\n # pass", "def fetch(self):\n\n ftpclient.download(self.ftp_server, self.results_key) # get results from nachi\n\n result = FadeBuilder.deserialize(self.results_key)\n\n if result is BaseException:\n self.set_exception(result)\n else:\n self.set_result(result)\n\n self._state = FadeFuture.FINISHED\n\n return", "def getDetails(self, option=\"Firmware\"):\n\n def get_repo_data(repos, col_num):\n \"\"\"\n Finds 'State', 'Repositories', 'Image Type', 'Source Path', 'In Use' data for all OS Image Repositories and\n 'State', 'Repository Name', 'Source', 'Custom Bundles' for all Firmware/Software Repositories\n :param repos: list of OS or Firmware locators\n :param col_num: 5 for OS and 4 for Firmware, based on number of colons required\n :return: list of data from tables\n \"\"\"\n repos_data = []\n for repo in repos:\n tds = repo.find_elements_by_xpath(\"./td\")\n td_text = []\n for index, td in enumerate(tds):\n if index == 0 and col_num == 4:\n text = td.text\n text = text.split('\\n')\n if len(text) > 1:\n td_text.append(text[1])\n continue\n if index == col_num:\n break\n td_text.append(td.text)\n repos_data.append(td_text)\n return repos_data\n\n def zipped_data(repos_data):\n \"\"\"\n Makes a dictionary out of colon names as a key and data from repositories under that colon as a value\n eg. {'In Use': 'False', etc.}\n :param repos_data: list of repository data within list\n :return: list of data as dictionary for each repository\n \"\"\"\n os_col_names = ['State', 'Repositories', 'Image Type', 'Source Path', 'In Use']\n fw_col_names = ['State', 'Repository Name', 'Source', 'Custom Bundles']\n\n repo_data = []\n for repo in repos_data:\n if len(repo) == 4:\n zipped = zip(fw_col_names, repo)\n elif len(repo) == 5:\n zipped = zip(os_col_names, repo)\n repo_data.append(dict(zipped))\n return repo_data\n\n try:\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('repo_tab'))), action=\"CLICK\")\n os_repos = self.handleEvent(\n EC.presence_of_all_elements_located((By.XPATH, self.RepositoriesObjects('OS_repos'))))\n os_repos_data = get_repo_data(os_repos, col_num=5)\n utility.execLog(\"Able to fetch OS Repositories data: {}\".format(os_repos_data))\n if option == \"OS\":\n utility.execLog('Returning: \"{}\"'.format(zipped_data(os_repos_data)))\n return self.browserObject, True, zipped_data(os_repos_data)\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('FW_tab'))), action=\"CLICK\")\n fw_repos = self.handleEvent(\n EC.presence_of_all_elements_located((By.XPATH, self.RepositoriesObjects('FW_repos'))))\n fw_repos_data = get_repo_data(fw_repos, col_num=4)\n utility.execLog(\"Able to fetch Firmware Repositories data: {}\".format(fw_repos_data))\n if option == \"Firmware\":\n utility.execLog('Returning: \"{}\"'.format(zipped_data(fw_repos_data)))\n return self.browserObject, True, zipped_data(fw_repos_data)\n else:\n data = zipped_data(os_repos_data) + zipped_data(fw_repos_data)\n utility.execLog('Returning: \"{}\"'.format(zipped_data(data)))\n return self.browserObject, True, data\n except Exception as e:\n return self.browserObject, False, \"Unable to read Repositories :: Error -> {}\".format(e)", "async def _fetch_data(self) -> JobInfo:\n return await self.api.get_job()", "def __iter__(self) -> Generator[Optional[dict], None, None]:\n data_list, response, result = self.retrieve_data(self.url)\n\n if result != GithubApiResult.SUCCESS:\n self.logger.debug(\"Failed to retrieve the data even though 10 attempts were given\")\n yield None\n return\n\n # yield the first page data\n for data in data_list:\n yield data\n\n while 'next' in response.links.keys():\n next_page = response.links['next']['url']\n\n # Here we don't need to pass in params with the page, or the default params because the url from the headers already has those values\n data_list, response, result = self.retrieve_data(next_page)\n\n if result != GithubApiResult.SUCCESS:\n self.logger.debug(\"Failed to retrieve the data even though 10 attempts were given\")\n return\n\n for data in data_list:\n yield data", "def fetch(self) -> None:\n try:\n repository = Repo.clone_from(\n self._parsed_url.original_url,\n self._output_dir,\n depth=1,\n no_single_branch=True,\n env={\"GIT_TERMINAL_PROMPT\": \"0\"},\n )\n except Exception:\n raise REANAFetcherError(\n \"Cannot clone the given Git repository. Please check that the provided \"\n \"URL is correct and that the repository is publicly accessible.\"\n )\n\n if self._git_ref:\n try:\n repository.remote().fetch(self._git_ref, depth=1)\n repository.git.checkout(self._git_ref)\n except Exception:\n raise REANAFetcherError(\n f'Cannot checkout the given Git reference \"{self._git_ref}\"'\n )\n\n shutil.rmtree(os.path.join(self._output_dir, \".git\"))", "def fetch_self(self):\r\n self.parsed_doc['names'] = self.fetch_candidate_name() \r\n self.parsed_doc['phones'] = self.fetch_phone_numbers() \r\n self.parsed_doc['emails'] = self.fetch_emails() \r\n self.parsed_doc['github'] = self.fetch_github() \r\n self.parsed_doc['linkedin'] = self.fetch_linkedin() \r\n self.parsed_doc['degrees'] = self.fetch_degrees() \r\n self.parsed_doc['skills'] = self.fetch_skills() \r\n self.parsed_doc['education'] = self.fetch_education() \r\n self.parsed_doc['languages'] = self.fetch_languages() \r\n self.parsed_doc['addresses'] = self.fetch_address() \r\n self.parsed_doc['raw_resume'] = self.stringtext", "def _Download( self ):\n self._DownloadPipe += PackageUtil.ExecuteSimpleCommand( \"git\", [\"clone\", \"git@github.com:mastbaum/avalanche.git\", self.GetInstallPath()], None, os.getcwd() )\n return", "def get_data(self):\n has_next_page = True\n page = 1\n while has_next_page:\n print(f'Getting page {page}')\n response = self.get_articles(\n page=page,\n size=200,\n order_by='extracted_at',\n order_type='asc'\n )\n pagination = response.get('pagination')\n has_next_page = pagination.get('has_next')\n self.save_articles(response.get('articles'))\n page += 1\n time.sleep(2.5)", "def get_repo_data(repo, session=None):\n url = f'{GITHUB_API_URL}/repos/{repo}'\n return get_whole_response_as_json(url, session)", "async def github_repo_info(self, ctx: commands.Context, *repo: str) -> None:\n repo = \"/\".join(repo)\n if repo.count(\"/\") != 1:\n embed = discord.Embed(\n title=random.choice(NEGATIVE_REPLIES),\n description=\"The repository should look like `user/reponame` or `user reponame`.\",\n colour=Colours.soft_red\n )\n\n await ctx.send(embed=embed)\n return\n\n async with ctx.typing():\n repo_data = await self.fetch_data(f\"{GITHUB_API_URL}/repos/{quote(repo)}\")\n\n # There won't be a message key if this repo exists\n if \"message\" in repo_data:\n embed = discord.Embed(\n title=random.choice(NEGATIVE_REPLIES),\n description=\"The requested repository was not found.\",\n colour=Colours.soft_red\n )\n\n await ctx.send(embed=embed)\n return\n\n embed = discord.Embed(\n title=repo_data[\"name\"],\n description=repo_data[\"description\"],\n colour=discord.Colour.blurple(),\n url=repo_data[\"html_url\"]\n )\n\n # If it's a fork, then it will have a parent key\n try:\n parent = repo_data[\"parent\"]\n embed.description += f\"\\n\\nForked from [{parent['full_name']}]({parent['html_url']})\"\n except KeyError:\n log.debug(\"Repository is not a fork.\")\n\n repo_owner = repo_data[\"owner\"]\n\n embed.set_author(\n name=repo_owner[\"login\"],\n url=repo_owner[\"html_url\"],\n icon_url=repo_owner[\"avatar_url\"]\n )\n\n repo_created_at = datetime.strptime(repo_data[\"created_at\"], \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%d/%m/%Y\")\n last_pushed = datetime.strptime(repo_data[\"pushed_at\"], \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%d/%m/%Y at %H:%M\")\n\n embed.set_footer(\n text=(\n f\"{repo_data['forks_count']} ⑂ \"\n f\"• {repo_data['stargazers_count']} ⭐ \"\n f\"• Created At {repo_created_at} \"\n f\"• Last Commit {last_pushed}\"\n )\n )\n\n await ctx.send(embed=embed)", "def fetch_pkgbuild(self):\n\n package_dir = os.path.join(Package.cache_dir, self.pkgbase)\n\n # check if repo has ever been fetched\n if os.path.isdir(package_dir):\n if run([\"git\", \"fetch\"], cwd=package_dir).returncode != 0:\n logging.error(\"git fetch failed in directory {}\".format(package_dir))\n raise ConnectionProblem(\"git fetch failed in directory {}\".format(package_dir))\n\n head = run(\n [\"git\", \"rev-parse\", \"HEAD\"], stdout=PIPE, universal_newlines=True, cwd=package_dir\n ).stdout.strip()\n u = run(\n [\"git\", \"rev-parse\", \"@{u}\"], stdout=PIPE, universal_newlines=True, cwd=package_dir\n ).stdout.strip()\n\n # if new sources available\n if head != u:\n reset_return = run(\n [\"git\", \"reset\", \"--hard\", \"HEAD\"],\n stdout=DEVNULL, stderr=PIPE, cwd=package_dir, universal_newlines=True\n )\n if reset_return.returncode != 0:\n print(reset_return.stderr)\n logging.error(\"git reset failed in directory {}\".format(package_dir))\n raise InvalidInput(\"git reset failed in directory {}\".format(package_dir))\n\n pull_return = run(\n [\"git\", \"pull\"],\n stdout=DEVNULL, stderr=PIPE, cwd=package_dir, universal_newlines=True\n )\n if pull_return.returncode != 0:\n print(pull_return.stderr)\n logging.error(\"git pull failed in directory {}\".format(package_dir))\n raise ConnectionProblem(\"git pull failed in directory {}\".format(package_dir))\n\n # repo has never been fetched\n else:\n # create package dir\n try:\n os.makedirs(package_dir, mode=0o700, exist_ok=True)\n except OSError:\n logging.error(\"Creating package dir {} failed\".format(package_dir))\n raise InvalidInput(\"Creating package dir {} failed\".format(package_dir))\n\n # clone repo\n if run(\n [\"git\", \"clone\", \"{}/{}.git\".format(AurVars.aur_domain, self.pkgbase)],\n cwd=Package.cache_dir\n ).returncode != 0:\n logging.error(\"Cloning repo of {} failed in directory {}\".format(self.name, package_dir))\n raise ConnectionProblem(\"Cloning repo of {} failed in directory {}\".format(self.name, package_dir))", "def Fetch(self, request, global_params=None):\n config = self.GetMethodConfig('Fetch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def fetch(self, folder, source, version, params):\n metadata = []\n rvalue = {\"version\": version, \"metadata\": metadata}\n rcode = 0\n return rcode, rvalue", "def __init__(self, *args, **kwargs):\r\n \r\n self.current_result = dict()\r\n\r\n self.owner = kwargs['owner']\r\n self.resources = kwargs.get('resources', \r\n settings.GITHUB_SETTINGS['GITHUB_SUPPORTED_RESOURCES']\r\n )\r\n\r\n self.page = kwargs.get('page', settings.DEFAULT_PAGE)\r\n self.per_page = kwargs.get('per_page', settings.DEFAULT_PER_PAGE) \r\n \r\n self.repo_list = self._get_repo_list(**kwargs)\r\n\r\n self.repo_done = []\r\n self.current_repo = None", "def fetch(args):\n storage, remote_path = split_storage(args.remote)\n\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit(\"Local file %s already exists, not overwriting.\" % local_path)\n\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n\n osf = _setup_osf(args)\n project = osf.project(args.project)\n\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print(\"Local file %s already matches remote.\" % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n\n # only fetching one file so we are done\n break", "def __do_git_svn_fetch(self):\n logging.info('--- Do Git SVN Fetch ---')\n arguments = []\n if self.options.revision:\n revisions_range = self.options.revision.split(':')\n from_revision = revisions_range[0]\n try:\n to_revision = revisions_range[1]\n except IndexError:\n to_revision = 'HEAD'\n #\n arguments.append('-r')\n arguments.append(f'{from_revision}:{to_revision}')\n #\n if self.options.exclude:\n exclude_prefixes = []\n if self.options.trunk_prefix:\n exclude_prefixes.append(f'{self.options.trunk_prefix}[/]')\n #\n for tags_prefix in self.options.tags_prefixes:\n exclude_prefixes.append(f'{tags_prefix}[/][^/]+[/]')\n #\n for branches_prefix in self.options.branches_prefixes:\n exclude_prefixes.append(f'{branches_prefix}[/][^/]+[/]')\n #\n regex = '^(?:%s)(?:%s)' % (\n '|'.join(exclude_prefixes),\n '|'.join(self.options.exclude))\n arguments.append(f'--ignore-paths={regex}')\n #\n return self.git.svn_fetch(*arguments)", "def fetch(self, remote: str, branch: str) -> str:\n self.__verify_repo_initialized()\n address = heads.get_remote_address(self._env.branchenv, name=remote)\n self._client = HangarClient(envs=self._env, address=address)\n CW = ContentWriter(self._env)\n\n with closing(self._client) as client:\n client: HangarClient\n\n # ----------------- setup / validate operations -------------------\n\n try:\n cHEAD = heads.get_branch_head_commit(self._env.branchenv, branch)\n except ValueError:\n # branch does not exist on local client\n try:\n s_branch = client.fetch_branch_record(branch)\n sHEAD = s_branch.rec.commit\n except grpc.RpcError as rpc_error:\n if rpc_error.code() == grpc.StatusCode.NOT_FOUND:\n # branch does not exist on remote\n logger.error(rpc_error.details())\n raise rpc_error\n else:\n c_bhistory = summarize.list_history(\n self._env.refenv, self._env.branchenv, branch_name=branch)\n try:\n s_branch = client.fetch_branch_record(branch)\n sHEAD = s_branch.rec.commit\n except grpc.RpcError as rpc_error:\n if rpc_error.code() == grpc.StatusCode.NOT_FOUND:\n # branch does not exist on remote\n logger.error(rpc_error.details())\n raise rpc_error\n\n # verify histories are intact and should be synced\n if sHEAD == cHEAD:\n warnings.warn(f'NoOp: {sHEAD} == client HEAD {cHEAD}', UserWarning)\n return branch\n elif sHEAD in c_bhistory['order']:\n warnings.warn(\n f'REJECTED: remote HEAD: {sHEAD} behind local: {cHEAD}', UserWarning)\n return branch\n\n # ------------------- get data ------------------------------------\n\n mCmtResponse = client.fetch_find_missing_commits(branch)\n m_cmts = mCmtResponse.commits\n for commit in tqdm(m_cmts, desc='fetching commit data refs'):\n # Get missing label (metadata) digest & values\n m_labels = set(client.fetch_find_missing_labels(commit))\n for label in m_labels:\n received_hash, labelVal = client.fetch_label(label)\n CW.label(received_hash, labelVal)\n # Get missing data schema digests & values\n mSchemaResponse = client.fetch_find_missing_schemas(commit)\n for schema in mSchemaResponse.schema_digests:\n schema_hash, schemaVal = client.fetch_schema(schema)\n CW.schema(schema_hash, schemaVal)\n # Record missing data hash digests (does not get data itself)\n m_hashes = client.fetch_find_missing_hash_records(commit)\n m_schema_hash_map = defaultdict(list)\n for digest, schema_hash in m_hashes:\n m_schema_hash_map[schema_hash].append((digest, schema_hash))\n for schema_hash, received_data in m_schema_hash_map.items():\n CW.data(schema_hash, received_data, backend='50')\n\n # Get missing commit reference specification\n for commit in tqdm(m_cmts, desc='fetching commit spec'):\n cmt, parentVal, specVal, refVal = client.fetch_commit_record(commit)\n CW.commit(cmt, parentVal, specVal, refVal)\n\n # --------------------------- At completion -----------------------\n\n # Update (or create) remote branch pointer with new HEAD commit\n fetchBranchName = f'{remote}/{branch}'\n try:\n heads.create_branch(\n self._env.branchenv, name=fetchBranchName, base_commit=sHEAD)\n except ValueError:\n heads.set_branch_head_commit(\n self._env.branchenv, branch_name=fetchBranchName, commit_hash=sHEAD)\n\n return fetchBranchName", "def fetch_all_repos_info():\n\n def fetch(*args, **kwargs):\n kwargs[\"remote\"].fetch()\n\n repos = ALL_REPOS\n for repo_name, repo in zip(repos, _repos(repos)):\n repo_name = shorten_path(repo_name)\n print(f\"fetching {repo_name}\")\n _try_for_all_remotes(\n repo, fetch, raise_on_exception=False, stop_on_success=False, verbose=True\n )", "def _git_show(self, path, ref=\"HEAD\"):\n res = requests.get(\n \"/\".join([self.loc, ref, path]),\n auth=HTTPBasicAuth(self.username, self.password)\n )\n\n if res.status_code // 100 != 2:\n return None\n\n if res.headers['Content-Type'] == 'application/json':\n res = json.loads(res.content)\n # cache existence info about all directories shown!\n if path != \"talus/pypi/simple\" and res[\"type\"] == \"listing\":\n self._add_to_cache(path, items=res[\"items\"])\n else:\n res = res.content\n\n return res", "async def fetch(self, query, *args):\n return await self.conn.fetch(query, *args)", "def repo_info(self, attempt=1):\n\n response = self.postman.request('repo_list', page=attempt)\n\n if (response.status_code == requests.codes.ok):\n if (len(response.json()) != 0):\n for repo in response.json():\n self.repo_list.append(repo['name'])\n\n self.repo_info(attempt=attempt + 1)", "def fetch_registry_content(self):\n for registry_name, registry in self.registries.items():\n if not registry.source:\n continue\n registry.get_repositories()", "def fetch_and_display(self, prelude='\\n', json_output=False, interactive=True):\n self.fetch()\n self.display_results(prelude=prelude, json_output=json_output)\n if self._results_filtered:\n printerr('** Enter \"unfilter\" to show similar results DuckDuckGo omitted.')", "async def fetch(self, url=None, method='GET', body=None):\n log.debug('fetching \\n method: [%s] \\n url: %s \\n body: %s',\n method,\n url,\n body)\n if not method:\n method = HttpMethod.GET\n status: int = None\n text: str = None\n if method == HttpMethod.GET:\n async with self._http_session.get(url) as response:\n status = response.status\n text = await response.text()\n elif method == HttpMethod.POST:\n async with self._http_session.post(url, data=body) as response:\n log.debug('fetch POST response: %r', response)\n status = response.status\n text = await response.text()\n else:\n raise NotImplementedError(\n f\"HTTP requst method {method} not implemented yet. \"\n \"Contributions welcome!\")\n log.debug('fetch result status: %d, text: %s', status, text)\n return (status, text)", "def fetch_feedstock(repo_dir):\n repo = Repo(repo_dir)\n for remote in repo.remotes:\n try:\n remote.fetch()\n except GitCommandError:\n print(\"Failed to fetch {} from {}.\".format(remote.name, remote.url))", "async def repository(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"repository\"], *args, **kwargs)", "def __call__(self, *args, **kwargs):\n return self.fetch(*args, **kwargs)", "def fetch_content(self, url):\n # log.debug(\"Fetching content from: %s\", url)\n prepare_curl_callback = lambda x: x.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_GSSNEGOTIATE)\n self.http.fetch(url, self.handle_response, prepare_curl_callback=prepare_curl_callback, auth_username=':')", "def fetch(self, fetch: Fetch):\n self._fetch = fetch", "def pull(self, data):\n required = {'token', 'source'}\n api.validate(data, required)\n token = data['token']\n repo = data['source']\n self.credentials_module.authorize(token)\n result = self.docker_module.pull_image(repo)\n # credentials_module.add_image(token, result['image_id'])\n return result", "def fetch(update, context):\n message = f'fetching data, this should take few seconds... {EMOJI_SLEEP*3}'\n update.message.reply_text(message)\n try:\n start = timer()\n url = execute()\n message = f'data fetched in {round(timer() - start, 1)}s. here is url to spreadsheet: {url}'\n update.message.reply_text(message)\n except Exception as e:\n message = f'there was some error {EMOJI_SCREAM*3}\\nerror message: {e}'\n update.message.reply_text(message)", "def fetch(self, is_dl_forced=False):\n\n self.get_files(is_dl_forced)\n\n return" ]
[ "0.70215297", "0.6724511", "0.6645503", "0.6416693", "0.6366239", "0.6360344", "0.63311994", "0.62839985", "0.61965615", "0.61965615", "0.617245", "0.6151739", "0.6135985", "0.61268026", "0.6066018", "0.6021011", "0.6002441", "0.59988475", "0.5984806", "0.5982984", "0.59599936", "0.5917158", "0.589652", "0.58626413", "0.5841491", "0.58042216", "0.57875717", "0.57679737", "0.5761111", "0.5753168", "0.57479054", "0.57088447", "0.56989425", "0.5672885", "0.56612206", "0.5657971", "0.56241596", "0.55958134", "0.55901194", "0.55831933", "0.5578251", "0.5577531", "0.55558616", "0.55489177", "0.55409616", "0.5513718", "0.5507247", "0.54971063", "0.54664606", "0.546353", "0.5442727", "0.5440967", "0.54244494", "0.5411797", "0.5401929", "0.5399459", "0.5393081", "0.53858846", "0.53813004", "0.5381042", "0.537301", "0.5370726", "0.5354416", "0.5333609", "0.53256077", "0.530027", "0.5290866", "0.5290839", "0.52741337", "0.5260791", "0.5258326", "0.5251545", "0.5248425", "0.52394116", "0.52366114", "0.5207974", "0.5200469", "0.52001625", "0.51993364", "0.5185185", "0.5181206", "0.51742756", "0.5173442", "0.5170658", "0.51688486", "0.51656777", "0.5163865", "0.51582694", "0.5157516", "0.5156223", "0.5155815", "0.5152062", "0.51460356", "0.5142804", "0.5128826", "0.51286346", "0.51126087", "0.51044273", "0.5103553", "0.5093259" ]
0.64857197
3
Create a response model to pass to the presenter
def _create_response_model(self, data): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_response_model_ctor(self):\n return self._response_model_ctor", "def create_json_from_model(self):\n json = {\n \"enableAutoReply\": self.enable_auto_reply,\n \"responseSubject\": self.response_subject,\n \"responseBodyPlainText\": self.response_body_plain_text,\n \"responseBodyHtml\": self.response_body_html,\n \"restrictToContacts\": self.restrict_to_contacts,\n \"restrictToDomain\": self.restrict_to_domain,\n \"startTime\": self.start_time,\n \"endTime\": self.end_time\n }\n return json", "def handle_create_response(self, response):\n\n if not self.model._meta['update_from_write'] or not response.content:\n return\n\n try:\n obj = self.obj_from_response(response)\n except ValueError:\n obj = None\n\n self.handle_response(response)\n\n return obj", "def __call__(self, rv):\n if isinstance(rv, ResponseBase):\n return rv\n data, status, headers = unpack(rv)\n resp = flask.make_response(self._encoder(data, **self.json_settings),\n status, {'Content-Type': self.content_type})\n resp.headers.extend(headers)\n return resp", "def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):\n desired_format = self.determine_format(request)\n serialized = self.serialize(request, data, desired_format)\n return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs)", "def obj_from_response(self, response):\n\n obj = self.model()\n serializer = self.get_serializer()\n field_data = serializer.deserialize(to_unicode(response.content))\n obj.update_fields(field_data)\n obj._full_url = response.url\n\n return obj", "def get_response(self, request):\n view = self.get_view()\n # Call its view with the request and this model.\n return view(request, flexible_page=self)", "def create_response(result):\n return ControllerResponse(\n response=result,\n status=200,\n mime='application/json',\n jsonize=True,\n )", "def create_response_element(self, **kwargs):\r\n return None", "def json_response(self, request, *args, **kwargs):\n\n return HttpResponse(self.construct_json(),\n content_type='application/json',\n mimetype='application/json', status=self.status)", "def create_response_object(self, url_data, service_id, service_version):\n request_dict = {k: v[0] for k, v in url_data}\n\n create_response_object = {\n \"status\": request_dict[\"status\"],\n \"response\": request_dict[\"response\"],\n \"cache_condition\": request_dict.get(\"cache_condition\", \"\"),\n \"request_condition\": request_dict.get(\"request_condition\", \"\"),\n \"name\": request_dict[\"name\"],\n \"version\": service_version,\n \"content\": request_dict[\"content\"],\n \"content_type\": \"text/plain\",\n \"service_id\": service_id\n }\n\n if 'response_object_list' not in self.fastly_cache[service_id]:\n self.fastly_cache[service_id]['response_object_list'] = []\n\n self.fastly_cache[service_id][\n 'response_object_list'].append(create_response_object)\n return create_response_object", "def __init__(self, response):\n self.response = response\n self.object = response['object']\n self.event_id = response['event_id']\n self.created_at = response['created_at']\n self.data = response['data']\n self.request = response['request']\n self.event_type = response['type']\n self.livemode = response['livemode']", "def process_response(self, response: response_domain_model):\n ...", "def adapt_response(self, response):\n return response", "def adapt_response(self, response):\n return response", "def to_response(self):\n raise NotImplementedError(\"Must define to_response on `%s`\" % self.__class__.__name__)", "def CreateModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def __init__(self, res):\n self.fromResponseObj(res)", "def __init__(self, res):\n self.fromResponseObj(res)", "def to_response(self, data):\n return self.from_dict(data).to_dict()", "def get_initial_response():\n # Message to the user\n message = {\n 'apiVersion': 'v1.0',\n 'status': '200',\n 'message': 'Flask API - Doubtnut - OPENCV'\n }\n # Making the message looks good\n resp = jsonify(message)\n # Returning the object\n return resp", "def CreateModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def CreateModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def _bld_resp(self, status=200, entry_or_list=None):\n resp = pvm_adp.Response('meth', 'path', status, 'reason', {})\n resp.entry = None\n resp.feed = None\n if entry_or_list is None:\n resp.feed = pvm_ent.Feed({}, [])\n else:\n if isinstance(entry_or_list, list):\n resp.feed = pvm_ent.Feed({}, entry_or_list)\n else:\n resp.entry = entry_or_list\n return resp", "def make_response(self):\n params = {\n 'tweet.fields': 'created_at,public_metrics,entities',\n 'expansions': 'author_id',\n 'user.fields': 'description'\n }\n return self.response_limit(params)", "def make_response(self, rv):\n status_or_headers = headers = None\n if isinstance(rv, tuple):\n rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))\n\n if rv is None:\n raise ValueError('View function did not return a response')\n\n if isinstance(status_or_headers, (dict, list)):\n headers, status_or_headers = status_or_headers, None\n\n if not isinstance(rv, self.response_class):\n if isinstance(rv, six.text_type):\n rv = self.response_class(rv, status=status_or_headers)\n else:\n raise ValueError('Content must be a string')\n\n if status_or_headers is not None:\n if isinstance(status_or_headers, six.text_type):\n # FIXME: I'm pretty sure Django's reason_phrase is *just* the\n # 'OK' in '200 OK', whereas Flask allows passing '200 OK'\n rv.reason_phrase = status_or_headers\n else:\n rv.status = status_or_headers\n\n if headers:\n # HttpResponse doesn't take a headers kwarg, so we must set each\n # header manually with rv[header] = value\n if isinstance(headers, dict):\n headers_iter = six.iteritems(headers)\n elif isinstance(headers, list):\n headers_iter = headers\n else:\n raise ValueError('headers must be dict, list, or None')\n\n for header, value in headers_iter:\n rv[header] = value\n\n return rv", "def get_response_data(self):\r\n raise NotImplementedError", "def main_response(self, data):", "def main_response(self, data):", "def __init__(self, **kwargs):\n\n ## Error message\n self.error = ''\n ## Error code\n self.result = 0\n ## Apply passed keyword arguments to the Request object.\n super(ObjectDetectionLoadModels.Response, self).__init__(**kwargs)", "def to_response_data(self) -> typing.Any:\n return None", "def _create_response_detail(self, request, serializer):\n def build_item(source):\n \"\"\"build time data\"\"\"\n return dict(id=source['id'],\n uuid=source['uuid'],\n creation_time=source['creation_time'],\n version=source['version'])\n if self._short_response(request):\n data = serializer.data\n if isinstance(data, (list)):\n detail = [build_item(item) for item in data]\n else:\n detail = build_item(data)\n else:\n detail = serializer.data\n return detail", "def create(self):\n response = self.request()\n result_obj = result.Result()\n result_obj.response = response\n\n self.set_result(result_obj, self.execution_type)\n if self.execution_type == \"async\":\n # We set the execution status to 0 as there is no way of knowing the\n # status of async call. Only while reading the response data we will set\n # the actual status code in the result object\n result_obj.set_status_code(int(0))\n return result_obj", "def get_response_for_api(self):\n coll_1 = \"I'm from Telegram\"\n coll_2 = \" Controller Class\"\n result = coll_1 + coll_2\n return {\n 'response': result\n }", "def get_response(self, request):\n data = self.get_data(request)\n outrepr = self.get_outrepr(request)\n return outrepr(data)", "def __init__(self):\n self.responses = StudentResponsePrototype.objects.all()", "def create_route_response(self, ApiId: str, RouteId: str, RouteResponseKey: str, ModelSelectionExpression: str = None, ResponseModels: Dict = None, ResponseParameters: Dict = None) -> Dict:\n pass", "def create_response(self, data):\n return Statuses(**data)", "def get_initial_response():\n # Message to the user\n message = {\n 'apiVersion': 'v1.1',\n 'status': 'Online',\n 'message': 'Welcome to the Space Object Registry API. Refer to the documentation on https://github.com/wdelenclos/messier-registry.',\n 'sources' : sources\n }\n # Making the message looks good\n resp = jsonify(message)\n # Returning the object\n return resp", "def get_response(self):\r\n response = self.response\r\n return response", "def set_model_from_json(self, json):\n self.enable_auto_reply = get_value_from_json(json, \"enableAutoReply\")\n self.response_subject = get_value_from_json(json, \"responseSubject\")\n self.response_body_plain_text = json.get(\"responseBodyPlainText\")\n self.response_body_html = json.get(\"responseBodyHtml\")\n self.restrict_to_contacts = get_value_from_json(json, \"restrictToContacts\")\n self.restrict_to_domain = json.get(\"restrictToDomain\")\n self.start_time = get_value_from_json(json, \"startTime\")\n self.end_time = get_value_from_json(json, \"endTime\")\n return self", "def opt_model_create_rest_api():\n request_json = request.get_json()\n OptimModelRequestAPI(request_json).validate()\n return create_model_data(request_json)", "def _instantiateResource(self, res):\n\n\n try:\n pagination = vsdModels.Pagination(**res)\n pagination.validate() #will fail if it doesno't have totalCount\n return pagination\n except:\n resourcetype, oid = self.getResourceTypeAndId(res['selfUrl'])\n if resourcetype == 'objects':\n return vsdModels.APIObject._create(res)\n #e.g FolderLinks\n model = vsdModels.resourceTypes[resourcetype](**res)\n return model", "def __init__(self, response):\n self.response = response\n self.object = response['object']\n self.webhook_endpoint_id = response['webhook_endpoint_id']\n self.created_at = response['created_at']\n self.updated_at = response['updated_at']\n self.status = response['status']\n self.url = response['url']\n self.events = response['events']\n self.livemode = response['livemode']\n self.secret = response['secret']", "def build_response(self, data_list):\n raise NotImplementedError(\"build_response method is not implemented.\")", "def create_model(self):\n pass", "def create_model(self):\n pass", "def requests_response_to_model(response_transformer):\n def response_transform_decorator(original_func):\n \"\"\"\n Creates wrapper around a function that returns response\n \"\"\"\n def response_transformer_wrapper(*args, **kwargs):\n \"\"\"\n Log errors and apply transformation in response_handler_func\n \"\"\"\n try:\n response = original_func(*args, **kwargs)\n response.raise_for_status()\n\n except requests.exceptions.HTTPError:\n help_string = ('Please consult the Coursera Data '\n 'Exports Guide for further assistance: '\n 'https://partner.coursera.help/hc/en-us/articles/360021121132.') # noqa\n\n if (response.status_code == 403):\n help_string = ('Please authorize this application '\n 'by running:\\n'\n '\\t$ courseraoauth2client config authorize --app manage_research_exports\\n' # noqa\n 'See https://github.com/coursera/courseraoauth2client ' # noqa\n 'for more information on authorization.\\n'\n 'For further assistance, consult the '\n 'Coursera Data Exports Guide '\n 'https://partner.coursera.help/hc/en-us/articles/360021121132.') # noqa\n\n logging.error(\n 'Request to {url} with body:\\n\\t{body}\\nreceived response'\n ':\\n\\t{text}\\n'\n '{help_string}\\n'\n .format(url=response.url,\n text=response.text,\n body=(response.request and response.request.body),\n help_string=help_string))\n raise\n\n return response_transformer(response)\n return response_transformer_wrapper\n return response_transform_decorator", "def __init__(self, response, accountId=False):\n self.result = response \n self.accountId = accountId", "def _prepare_response(self, response):\n\n if not isinstance(response, Response):\n return Response(0, response)\n return response", "def MakeModel(self):\n pass", "def __init__(self, response_dict={}):\n self.id = response_dict.get('id')\n self.name = response_dict.get('name')\n self.image_url = response_dict.get('imageUrl')\n self.subtype = response_dict.get('subtype')\n self.supertype = response_dict.get('supertype')\n self.ability = response_dict.get('ability')\n self.hp = response_dict.get('hp')\n self.retreat_cost = response_dict.get('retreatCost')\n self.number = response_dict.get('number')\n self.artist = response_dict.get('artist')\n self.rarity = response_dict.get('rarity')\n self.series = response_dict.get('series')\n self.set = response_dict.get('set')\n self.set_code = response_dict.get('setCode')\n self.types = response_dict.get('types')\n self.attacks = response_dict.get('attacks')\n self.weaknesses = response_dict.get('weaknesses')\n self.resistances = response_dict.get('resistances')", "def handle_models_request():\n # TODO: add sort and filter by creation/modification date\n return (\n json.dumps({\"models\": {\n k: d[\"data\"] for k, d in app.models.items()\n }}), 200,\n {'ContentType': 'application/json'}\n )", "def from_response(cls, resp):\n try:\n data = resp.json()\n except ValueError:\n data = None\n\n return cls(resp.status_code, data)", "def create(self):\r\n self.require_collection()\r\n request = http.Request('POST', self.get_url(), self.wrap_object({}))\r\n\r\n return request, parsers.parse_json", "def __init__(self, response):\n super(Member, self).__init__(response)", "def response_class(self):\n raise NotImplementedError()", "def create_response(self):\n (paginator, page) = self.build_page()\n\n context = {\n 'query': self.query,\n 'form': self.form,\n 'page': page,\n 'paginator': paginator,\n 'suggestion': None,\n }\n\n if self.results and hasattr(self.results, 'query') and self.results.query.backend.include_spelling:\n context['suggestion'] = self.form.get_suggestion()\n\n context.update(self.extra_context())\n return render_to_response(self.template, \\\n context, \\\n context_instance=self.context_class(self.request), \\\n mimetype=\"application/json\")", "def parse_response(self):\n pass", "def decode_response(\n res_model: Type[T],\n resp: Response,\n) -> T:\n if resp.headers.get(HEADER_CONTENT_TYPE) == MSGPACK_CONTENT_TYPE:\n return msgpack.decode(resp.content, type=res_model)\n return parse_raw_as(res_model, resp.text)", "def parse(self, **kwargs):\n\t\treturn self.create(**kwargs)", "def build_simple_model(self, razor_json):\n \n # loop through all the nodes that were returned and take the simple info from them\n for response in razor_json['response']:\n model = {'name': response['@name'],\n 'root_password': response['@root_password'],\n 'current_state': response['@current_state'],\n 'uuid': response['@uuid'],\n 'label': response['@label']\n }\n\n return model", "def get_result_model(cls):\n raise NotImplementedError()", "def gen_response_object(description, media_type, api_type, path):\n\n response = {\n 'description': description,\n 'content': gen_media_type_object(media_type, api_type, path)\n }\n\n return response", "def fromResponseObj(self, res):\n if type(res) is dict and res.get('data') is not None:\n self.message = res.get(\"message\")\n newPickup = res[\"data\"]\n self._id = newPickup[\"_id\"]\n self.puid = newPickup[\"puid\"]\n self.business = newPickup[\"business\"]\n self.businessLocationId = newPickup[\"businessLocationId\"]\n self.scheduledDate = newPickup[\"scheduledDate\"]\n self.scheduledTimeSlot = newPickup[\"scheduledTimeSlot\"]\n self.contactPerson = newPickup[\"contactPerson\"]\n self.createdAt = newPickup[\"createdAt\"]\n self.updatedAt = newPickup[\"updatedAt\"]\n else:\n self.message = str(res)", "def _process_response(self, status_code, response):\n\n formatter = self.formatter\n if not formatter:\n formatter = FormatterFactory(constants.FormatterConst.JSON)\\\n .get_formatter()\n\n response = Response(response, status_code, formatter, self)\n formatted_data = response.formatted_data\n\n if status_code >= constants.ResponseCode.BAD_REQUEST:\n\n if status_code == constants.ResponseCode.NOT_FOUND:\n error_msg = \\\n constants.ErrorConst.NOT_FOUND\n elif constants.ErrorConst.ERROR not in formatted_data:\n error_msg = \\\n constants.ResponseConst.DEFAULT_ERROR_MESSAGE\n else:\n error_msg = formatted_data.get(\n constants.ErrorConst.ERROR, {}\n ).get(\n constants.ErrorConst.DETAIL,\n constants.ErrorConst.UNRECOGNIZED_ERROR\n )\n\n self.debug.error(\n constants.ResponseConst.STATUS_CODE, status_code\n )\n self.debug.error(\n constants.ResponseConst.RESPONSE, response.formatted_data\n )\n raise SendbeeRequestApiException(error_msg)\n else:\n self.debug.ok(constants.ResponseConst.STATUS_CODE, status_code)\n self.debug.ok(constants.ResponseConst.RESPONSE, response.raw_data)\n\n if response.meta.current_page:\n if response.meta.current_page > 1 and len(response.models) == 0:\n raise PaginationException(\n f'Page {response.meta.current_page} has no data'\n )\n\n if response.warning:\n click.secho(\n constants.WarningConst.MESSAGE + response.warning,\n fg='yellow'\n )\n\n if self.single_model_response:\n if response.models:\n return response.models[0]\n else:\n return None\n else:\n return response", "def create() -> TJsonResponse:\n if request.headers['Content-Type'] == 'application/json':\n url = request.json.get('url')\n else:\n url = request.form.get('url')\n if not url:\n return jsonify(error='bad request'), 400\n result = scrape.scrape_meta_for_url(url)\n inserted_id, tags = result.get()\n url_hash = encode(inserted_id)\n response_body: Dict[str, Any] = jsonify(hash=url_hash, short_url=f'https://fanlens.io/@{url_hash}', tags=tags)\n return response_body", "def _create_valid_response(self, response, endpoint, kwargs):\n body, status, headers = response\n continuation_token = getattr(body, \"continuation_token\", None)\n total_item_count = getattr(body, \"total_item_count\", None)\n total = getattr(body, \"total\", None)\n more_items_remaining = getattr(body, \"more_items_remaining\", None)\n items = None\n if body is not None:\n items = iter(ItemIterator(self, endpoint, kwargs,\n continuation_token, total_item_count,\n body.items,\n headers.get(Headers.x_request_id, None),\n more_items_remaining or False, None))\n return ValidResponse(status, continuation_token, total_item_count,\n items, headers, total, more_items_remaining)", "def fromResponseObj(self, res):\n if type(res) is dict and res.get('data') is not None:\n self.message = res.get(\"message\")\n data = res[\"data\"]\n self._id = data[\"_id\"]\n self.pickupAddress = data[\"pickupAddress\"]\n self.dropOffAddress = data[\"dropOffAddress\"]\n self.cod = data[\"cod\"]\n self.receiver = data[\"receiver\"]\n self.state = data[\"state\"]\n self.type = data[\"type\"]\n self.trackingNumber = data[\"trackingNumber\"]\n self.holder = data[\"holder\"]\n self.timeline = data[\"timeline\"]\n self.history = data[\"history\"]\n self.creationTimestamp = data[\"creationTimestamp\"]\n else:\n self.message = str(res)", "def reponse(self, data):\n response = self.response\n response.headers['Content-Type'] = 'application/json'\n json.dump(data, response.out)\n return response", "def __call__(self, url, **kwargs):\n self.response = MockResponse()\n self.response.url = url\n self.response.params = kwargs.get('params')\n self.kwargs = kwargs\n return self.response", "def get(self):\n resp = Response()\n return resp", "def to_response_data(self) -> typing.Any:\n v = self.value or {}\n error_code = v.get(\"code\", \"GenericLobotomyError\")\n error_message = v.get(\"message\", \"There was an error.\")\n return {\"Error\": {\"Code\": error_code, \"Message\": error_message}}", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(AlignModelSrvResponse, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.model_aligned is None:\n self.model_aligned = articulation_msgs.msg.ModelMsg()\n if self.data_aligned is None:\n self.data_aligned = articulation_msgs.msg.ModelMsg()\n if self.R is None:\n self.R = [0.,0.,0.,0.,0.,0.,0.,0.,0.]\n if self.T is None:\n self.T = [0.,0.,0.]\n if self.dist_rot is None:\n self.dist_rot = 0.\n if self.dist_trans is None:\n self.dist_trans = 0.\n else:\n self.model_aligned = articulation_msgs.msg.ModelMsg()\n self.data_aligned = articulation_msgs.msg.ModelMsg()\n self.R = [0.,0.,0.,0.,0.,0.,0.,0.,0.]\n self.T = [0.,0.,0.]\n self.dist_rot = 0.\n self.dist_trans = 0.", "def make_response(self, response_type, p_api1=None, p_api2=None, double1=0, double2=0, ptr1=None, size1=0, ptr2=None, size2=0, ptr3=None, size3=0):\n if p_api1:\n p_api1 = id(p_api1)\n if p_api2:\n p_api2 = id(p_api2)\n if ptr1:\n ptr1 = byref(ptr1)\n if size1 == 0:\n size1 = 1\n if ptr2:\n ptr2 = byref(ptr2)\n if size2 == 0:\n size2 = 1\n if ptr3:\n ptr3 = byref(ptr3)\n if size3 == 0:\n size3 = 1\n return self._on_response(response_type.value, p_api1, p_api2, double1, double2, ptr1, size1, ptr2, size2, ptr3, size3)", "def get_response(self, list_item):\n return {\n 'title': self.get_title(list_item),\n 'link': self.get_link(list_item),\n 'address': self.get_address(list_item),\n 'phone': self.get_phone(list_item),\n 'score': self.get_score(list_item),\n }", "def CreateModel(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def to_http_response(self) -> HttpResponse:\n response = (\n JsonResponse(self.body)\n if (self.headers or {}).get(\"Content-Type\") == \"application/json\"\n else HttpResponse(self.body)\n )\n response.headers = self.headers\n return response", "def _create_response(raws: [MailWrapper], status=200, mimetpe='application/json') :\n results = []\n for raw in raws:\n results.append(raw.as_dict())\n\n\n return Response(response=json.dumps(results),\n status=status,\n mimetype=mimetpe)", "def _collection_from_response(data):\n return Collection(uuid=UUID(data['uuid']), title=data['title'])", "def api_response():\n\n data = {\n 'hello': 'world',\n 'number': 12\n }\n\n js = json.dumps(data)\n # (@data, @status_code (200 by default), @data_type)\n resp = Response(response=js, status=200, mimetype='application/json')\n\n # Using jsonify to simplify syntax, returns exactly the same flask-Response object\n # from flask import jsonify\n # resp = jsonify(data)\n # resp.status_code = 200\n\n return resp", "def from_response(cls, response):\n\n d = {\n \"headers\": dict(response.getheaders()),\n \"content\": response.read(),\n \"status\": response.status,\n \"reason\": response.reason,\n \"raw_headers\": response.msg.headers,\n \"length\": response.length,\n \"version\": response.version,\n }\n return cls.from_dict(d)", "def P_GetModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _respond(self, answers=[], authority=[], additional=[], rCode=OK):\n response = Message(rCode=rCode)\n for (section, data) in [(response.answers, answers),\n (response.authority, authority),\n (response.additional, additional)]:\n section.extend([\n RRHeader(name, record.TYPE, getattr(record, 'CLASS', IN),\n payload=record)\n for (name, record) in data])\n return response", "def to_response(self):\n return make_response(self.res, self.status)", "def to_response(self):\n return make_response(self.res, self.status)", "def GetModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def create_view(self) -> Optional[dict]:\n catch_keyboard_interrupt()\n\n db = mongodb_db(os.environ['DB_CONNECTION_STRING'])\n if self.model_version == 'latest':\n latest_model_ts = max(db.model.find().distinct('added_on'))\n d = db.model.find_one({'added_on': latest_model_ts})\n else:\n d = db.model.find_one({'_id': self.model_version})\n logger.debug(f'Model version: {d[\"_id\"]}')\n\n labels_vals = list(d['labels'].values())\n if self.method == 'mean':\n count_m = np.mean(labels_vals)\n elif self.method == 'median':\n count_m = np.median(labels_vals)\n else:\n count_m = 10\n\n excluded_labels = os.getenv('EXCLUDE_LABELS')\n if excluded_labels:\n excluded_labels = excluded_labels.split(',')\n else:\n excluded_labels = []\n\n labels_with_few_annos = []\n for k, v in d['labels'].items():\n if count_m > v and k not in excluded_labels:\n labels_with_few_annos.append(k)\n\n headers = self._make_headers()\n\n view_template = {\n 'data': {\n 'type': 'list',\n 'title': '',\n 'target': 'tasks',\n 'gridWidth': 4,\n 'columnsWidth': {},\n 'hiddenColumns': {\n 'explore': [\n 'tasks:annotations_results', 'tasks:annotations_ids',\n 'tasks:predictions_score', 'tasks:predictions_results',\n 'tasks:file_upload', 'tasks:created_at',\n 'tasks:updated_at'\n ],\n 'labeling': [\n 'tasks:id', 'tasks:completed_at',\n 'tasks:cancelled_annotations',\n 'tasks:total_predictions', 'tasks:annotators',\n 'tasks:annotations_results', 'tasks:annotations_ids',\n 'tasks:predictions_score',\n 'tasks:predictions_model_versions',\n 'tasks:predictions_results', 'tasks:file_upload',\n 'tasks:created_at', 'tasks:updated_at'\n ]\n },\n 'columnsDisplayType': {},\n 'filters': {\n 'conjunction':\n 'or',\n 'items': [{\n 'filter': 'filter:tasks:predictions_results',\n 'operator': 'equal',\n 'type': 'String',\n 'value': 'placeholder_a'\n }, {\n 'filter': 'filter:tasks:predictions_results',\n 'operator': 'equal',\n 'type': 'String',\n 'value': 'placeholder_b'\n }]\n }\n }\n }\n\n default_view = copy.deepcopy(view_template)\n\n filtered_labels = []\n for label in labels_with_few_annos:\n filtered_labels.append({\n 'filter': 'filter:tasks:predictions_results',\n 'operator': 'contains',\n 'type': 'String',\n 'value': label\n })\n\n view_template['data']['filters']['conjunction'] = 'or' # noqa: PyTypeChecker\n view_template['data']['filters']['items'] = filtered_labels\n view_template['data']['title'] = 'rare_classes'\n\n view_template.update({'project': self.project_id})\n\n url = f'{os.environ[\"LS_HOST\"]}/api/dm/views?project={self.project_id}'\n resp = requests.get(url, headers=headers)\n\n default_tab = [\n x for x in resp.json() if x['data']['title'] == 'Default'\n ]\n\n if not default_tab:\n logger.debug(\n f'Creating default view for project {self.project_id}')\n default_view.update({'project': self.project_id})\n default_view['data']['title'] = 'Default'\n default_view['data'].pop('filters')\n url = f'{os.environ[\"LS_HOST\"]}/api/dm/views/'\n new_view_resp = requests.post(url,\n headers=headers,\n data=json.dumps(default_view))\n new_default_view = new_view_resp.json()\n logger.debug(f'Response: {new_default_view}')\n\n existing_rare_classes_tab = [\n x for x in resp.json() if x['data']['title'] == 'rare_classes'\n ]\n\n if existing_rare_classes_tab:\n version_col = 'tasks:predictions_model_versions'\n explore_dict = existing_rare_classes_tab[0]['data'][\n 'hiddenColumns']['explore']\n if existing_rare_classes_tab[0]['data']['filters'][\n 'items'] == filtered_labels and (version_col\n in explore_dict):\n logger.debug(\n 'An identical `rare_classes` view already exists for '\n f'project {self.project_id}. Skipping...')\n return\n else:\n logger.debug(\n 'The list of rare classes has changed! Replacing...')\n existing_view_id = existing_rare_classes_tab[0]['id']\n url = f'{os.environ[\"LS_HOST\"]}/api/dm/views/' \\\n f'{existing_view_id}'\n _ = requests.delete(url, headers=headers)\n\n url = f'{os.environ[\"LS_HOST\"]}/api/dm/views/'\n logger.debug(f'Request: {url} -d {view_template}')\n resp = requests.post(url,\n headers=headers,\n data=json.dumps(view_template))\n new_view = resp.json()\n logger.debug(f'Response: {new_view}')\n return new_view", "def __call__(self, rv):\n raise NotImplementedError(\"You must subclass from ApiResponse.\")", "def create_response(command, metadata=None):\n resp = Message()\n resp.set_field(MessageKeys.response_key, command)\n if metadata is not None:\n for key, value in metadata.items():\n resp.set_field(key, value)\n return resp", "def __call__(self, get_response, request):\n response = self._response_class(*self._args, **self._kwargs)\n return response", "def make_dict(result):\n response = dict()\n response.update(\n {\n 'url': result.get('Url', None),\n 'title': result.get('Title', None),\n 'description': result.get('Description', None),\n 'card_type': 1,\n 'icon_url': None,\n 'provider_icon_url': None,\n 'action_type': 1,\n })\n return response", "def __init__(self, raw_message: Dict):\n self.path = raw_message['request']['path'].decode()\n self.request_headers = {k.decode(): v.decode() for k, v in raw_message['request']['headers']}\n\n request_text = raw_message['request']['content']\n if request_text:\n self.request = json.loads(request_text.decode())\n else:\n self.request = None\n\n self.response_headers = {k.decode(): v.decode() for k, v in raw_message['response']['headers']}\n self.response_type = self.response_headers.get('Content-Type')\n\n # This could get fairly resource intensive if our records get large,\n # but for now we keep things simple - all parsing happens in this class, and we can optimize later\n response_bytes = raw_message['response']['content']\n if self.response_headers.get('Content-Encoding') == 'gzip':\n response_bytes = gzip.decompress(response_bytes)\n\n # Default response is empty string\n if self.response_type == 'application/json':\n # strict=False allows control codes, as used in tidyverse output\n self.response = json.loads(response_bytes.decode(), strict=False)\n elif self.response_type == 'image/png':\n self.response = base64.b64encode(response_bytes).decode('ascii')\n # if we actually wanted to work with the image, could do so like this:\n # img = Image.open(io.BytesIO(response_bytes))\n elif response_bytes:\n self.response = '**unsupported**'\n else:\n self.response = ''", "def _init_(self):\n self.res = {}", "def __init__(self, response_model_ctor, user_sampler, slate_size):\n if not response_model_ctor:\n raise TypeError('response_model_ctor is a required callable')\n\n self._user_sampler = user_sampler\n self._user_state = self._user_sampler.sample_user()\n self._response_model_ctor = response_model_ctor\n self._slate_size = slate_size", "def api_reponses(request):\n if request.method == 'GET':\n reponses = Reponse.objects.all()\n serializer = ReponseSerializer(reponses, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = ReponseSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def receiver():\n def generate(entities_to_proceed):\n \"\"\"Process list of entities populating them with altitude data\"\"\"\n yield \"[\"\n for index, entity in enumerate(entities_to_proceed):\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n logging.debug(\"processing entity : %s\", entity)\n else:\n logging.info(\"processing entity : %s\", entity.get(GUID_STR))\n\n if index > 0:\n yield \",\"\n booking_guid = entity.get(GUID_STR)\n iata = entity.get(IATA_STR)\n api_key = resolve_api_key(API_KEYS, iata)\n\n if not isinstance(api_key, str):\n entity[PROP] = []\n yield json.dumps(entity)\n continue\n url = URL_TEMPLATE.render(entity) + booking_guid + \"?api_key=\" + api_key\n if METHOD == \"get\":\n entity[PROP] = requests.get(url, headers=HEADERS).json()\n else:\n entity[PROP] = requests.request(METHOD, url, data=entity.get(\"payload\"),\n headers=HEADERS).json()\n yield json.dumps(entity)\n yield \"]\"\n\n # get entities from request\n entities = request.get_json()\n\n # create the response\n logging.debug(\"Processing %i entities\", len(entities))\n return Response(generate(entities), mimetype='application/json')", "def response_handling(self) -> global___Snippet.SimpleResponseHandling:", "def response_handling(self) -> global___Snippet.SimpleResponseHandling:", "def __init__(self):\n self.result_obj = {}" ]
[ "0.70443594", "0.63880855", "0.61683625", "0.61580884", "0.6102591", "0.61025757", "0.60942143", "0.60272694", "0.59198946", "0.5912622", "0.5899994", "0.5891911", "0.58650625", "0.58380055", "0.58380055", "0.58321315", "0.5831333", "0.582527", "0.582527", "0.58120483", "0.57982874", "0.5794504", "0.57437545", "0.5729958", "0.5720223", "0.5707388", "0.5695699", "0.5690905", "0.5690905", "0.5686904", "0.5682511", "0.5666751", "0.56625676", "0.5610844", "0.5605974", "0.5605932", "0.56043935", "0.5600252", "0.5598792", "0.55967695", "0.5575771", "0.55720425", "0.5563162", "0.5559481", "0.5549979", "0.55426764", "0.55426764", "0.5531702", "0.55297875", "0.55227757", "0.5482013", "0.54818004", "0.54786026", "0.5468498", "0.54513854", "0.5446565", "0.5440872", "0.5432485", "0.54318917", "0.5422866", "0.54225844", "0.5418304", "0.5411179", "0.5405789", "0.5404224", "0.5401997", "0.5396118", "0.5384808", "0.53823", "0.5379359", "0.53701806", "0.53685164", "0.5366596", "0.53642774", "0.5363176", "0.5360816", "0.53571355", "0.53550285", "0.53449047", "0.53367376", "0.5329548", "0.5328233", "0.5325359", "0.53043485", "0.53018045", "0.53018045", "0.53009236", "0.5296096", "0.52935785", "0.52931553", "0.52856237", "0.5283191", "0.52824074", "0.52808666", "0.52800375", "0.52760255", "0.52731854", "0.52720904", "0.52720904", "0.5261582" ]
0.8459162
0
Transform entity into a response model for the presenter
def _make_presentable(self, bookmark): return Bookmark( id= bookmark.id, name=bookmark.name, url=bookmark.url, date_created=bookmark.date_created )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_response_model(self, data):\n pass", "def return_entity(self, entity, request, environ, start_response,\n response_headers, status=200, status_msg=\"Success\"):\n response_type = self.content_negotiation(\n request, environ, self.EntryTypes)\n if response_type is None:\n return self.odata_error(\n request, environ, start_response, \"Not Acceptable\",\n 'xml, json or plain text formats supported', 406)\n # Here's a challenge, we want to pull data through the feed by\n # yielding strings just load in to memory at the moment\n if response_type == \"application/json\":\n data = str('{\"d\":%s}' %\n ''.join(entity.generate_entity_type_in_json()))\n else:\n doc = core.Document(root=core.Entry)\n e = doc.root\n e.set_base(str(self.service_root))\n e.set_value(entity)\n data = str(doc)\n data = data.encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n self.set_etag(entity, response_headers)\n start_response(\"%i %s\" % (status, status_msg), response_headers)\n return [data]", "def obj_from_response(self, response):\n\n obj = self.model()\n serializer = self.get_serializer()\n field_data = serializer.deserialize(to_unicode(response.content))\n obj.update_fields(field_data)\n obj._full_url = response.url\n\n return obj", "def make_response(self):\n params = {\n 'tweet.fields': 'created_at,public_metrics,entities',\n 'expansions': 'author_id',\n 'user.fields': 'description'\n }\n return self.response_limit(params)", "def instance_to_model(self):\n pass", "def convert_Request_to_RequestEntity(request):\n\n result = ResponseEntity()\n try:\n request_entity = RequestEntity()\n request_entity.request_id = request.id\n request_entity.username = request.account.user.username\n request_entity.title = request.title\n request_entity.description = request.description\n request_entity.category = utils.convert_category_str_to_enum_list(request.category)\n request_entity.price = request.price\n request_entity.price_currency = Enum.CURRENCY(int(request.price_currency))\n request_entity.origin_city = Enum.CITY(int(request.origin_city))\n request_entity.origin_address = request.origin_address\n request_entity.destination_city = Enum.CITY(int(request.destination_city))\n request_entity.destination_address = request.destination_address\n request_entity.image_url = utils.convert_string_to_list(request.image_url) # list\n request_entity.thumb_url = utils.convert_string_to_list(request.thumb_url)\n request_entity.expired_date = localtime(request.expired_date)\n request_entity.status = Enum.REQUEST_STATUS(int(request.status))\n request_entity.created_date = localtime(request.created_date)\n request_entity.last_modified_date = request.last_modified_date\n result.success = True\n result.data = request_entity\n except Exception as e:\n print str(e)\n result.success = False\n result.message = str(e)\n finally:\n return result", "def get_model(self, payload):\n return super(BulkEntryTransformer, self).to_model(payload)", "def to_response(self, data):\n return self.from_dict(data).to_dict()", "def get_response_model_ctor(self):\n return self._response_model_ctor", "def convert_RequestEntity_to_Request(request_entity):\n result = ResponseEntity()\n try:\n user = User.objects.get(username=request_entity.username)\n account = Account.objects.get(user=user)\n request = Request.objects.get(id=request_entity.request_id)\n request = copy_field_RequestEntity_to_Request(request_entity, request)\n result.success = True\n result.data = request\n except Exception as e:\n print str(e)\n result.success = False\n result.message = str(e)\n finally:\n return result", "def __call__(self, rv):\n if isinstance(rv, ResponseBase):\n return rv\n data, status, headers = unpack(rv)\n resp = flask.make_response(self._encoder(data, **self.json_settings),\n status, {'Content-Type': self.content_type})\n resp.headers.extend(headers)\n return resp", "def to_representation(self, instance):\n response = super().to_representation(instance)\n response['poster'] = { \n 'id': instance.poster.id, \n }\n return response", "def get_entities_handler(response):\n\n if response.status_code != HTTP_CODE_OK:\n raise HttpError('HTTP GET for Entity Set {0} failed with status code {1}'\n .format(self._name, response.status_code), response)\n\n content = response.json()\n\n return content", "def adapt_response(self, response):\n return response", "def adapt_response(self, response):\n return response", "def json_to_model(cls, data):\n m = cls.to_model(data)\n m.raw = data\n cls._unlock_unmarshalling(m)\n cls.set_additional_fields(m, data)\n return m", "def to_representation(self, instance):\n response = super().to_representation(instance)\n user_data = UserSerializer(instance.poster, context=self.context).data\n response['poster'] = {\n 'id': user_data['id'],\n 'email': user_data['email'],\n 'profile_picture': user_data['profile_picture'],\n 'first_name': user_data['first_name'],\n 'last_name': user_data['last_name'],\n 'profession': user_data['profession'],\n 'university': user_data['university'],\n 'university_major': user_data['university_major'],\n }\n\n return response", "def deserialize(self, resp):\r\n return self.serializer.deserialize(resp.content, format=resp['Content-Type'])", "def receiver():\n req_entities = request.get_json()\n output = []\n try:\n for entity in req_entities:\n logger.debug(f'Input entity: {json.dumps(entity)}')\n do_query = True # If do_query is missing from the entity we will do the query anyways.\n if 'do_query' in entity: # Check if entity has do_query key\n do_query = entity['do_query']\n else:\n logger.warning(f'Key \"do_query\" is missing from the input entity! Doing query for EVERY entity.')\n\n if do_query:\n handler = getattr(handlers, variables.handler) # Get the handler from env vars.\n entity = handler(databaseConnection, variables, logger, entity) # Append entity with handler.\n logger.debug(f'Output entity: {json.dumps(entity)}')\n output.append(entity)\n except TypeError as e:\n logger.critical('Wrong type gave error: {}'.format(e))\n except Exception as e:\n logger.critical(f'Error when handling entities:\\n{json.dumps(req_entities)}\\nError message:\\n{e}')\n\n # Generate the response\n try:\n return Response(stream_json(output),\n mimetype='application/json')\n except BaseException as e:\n return Response(status=500, response=f\"An error occured during transform of input. Error: {e}\")", "def to_response(self):\n raise NotImplementedError(\"Must define to_response on `%s`\" % self.__class__.__name__)", "def get_response(self, request):\n view = self.get_view()\n # Call its view with the request and this model.\n return view(request, flexible_page=self)", "def to_representation(self, instance):\n return instance", "def to_model(self, payload):\n return payload", "def to_entity(cls, item: \"ElasticsearchModel\"):\n item_dict = {}\n\n # Convert the values in ES Model as a dictionary\n values = item.to_dict()\n for field_name in attributes(cls.meta_.entity_cls):\n item_dict[field_name] = values.get(field_name, None)\n\n identifier = None\n if (\n current_domain.config[\"IDENTITY_STRATEGY\"] == IdentityStrategy.UUID.value\n and current_domain.config[\"IDENTITY_TYPE\"] == IdentityType.UUID.value\n and isinstance(item.meta.id, str)\n ):\n identifier = UUID(item.meta.id)\n else:\n identifier = item.meta.id\n\n # Elasticsearch stores identity in a special field `meta.id`.\n # Extract identity from `meta.id` and set identifier\n id_field_name = id_field(cls.meta_.entity_cls).field_name\n item_dict[id_field_name] = identifier\n\n # Set version from document meta, only if `_version` attr is present\n if hasattr(cls.meta_.entity_cls, \"_version\"):\n item_dict[\"_version\"] = item.meta.version\n\n entity_obj = cls.meta_.entity_cls(item_dict)\n\n return entity_obj", "def to_payload(self, model):\n return model", "def process_response(self, response: response_domain_model):\n ...", "def view(self, domain_id: TDomainId) -> TEntityDto:\n raise NotImplementedError()", "def receiver():\n def generate(entities_to_proceed):\n \"\"\"Process list of entities populating them with altitude data\"\"\"\n yield \"[\"\n for index, entity in enumerate(entities_to_proceed):\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n logging.debug(\"processing entity : %s\", entity)\n else:\n logging.info(\"processing entity : %s\", entity.get(GUID_STR))\n\n if index > 0:\n yield \",\"\n booking_guid = entity.get(GUID_STR)\n iata = entity.get(IATA_STR)\n api_key = resolve_api_key(API_KEYS, iata)\n\n if not isinstance(api_key, str):\n entity[PROP] = []\n yield json.dumps(entity)\n continue\n url = URL_TEMPLATE.render(entity) + booking_guid + \"?api_key=\" + api_key\n if METHOD == \"get\":\n entity[PROP] = requests.get(url, headers=HEADERS).json()\n else:\n entity[PROP] = requests.request(METHOD, url, data=entity.get(\"payload\"),\n headers=HEADERS).json()\n yield json.dumps(entity)\n yield \"]\"\n\n # get entities from request\n entities = request.get_json()\n\n # create the response\n logging.debug(\"Processing %i entities\", len(entities))\n return Response(generate(entities), mimetype='application/json')", "def fetch_entity(endpoint, values):\n values['entity'] = Entity.objects.get_or_404(name=values['entity'])", "def presavemodel(self):\n # one thing we need to do here is handle any lazy serialization helpers.\"\"\"\n self.presavemodel_serializationhelpers_updatefields()", "def post(self, entity):\n return '', 200", "def retrieve(self, request, *args, **kwargs):\n _data_id, _format = get_data_and_form(kwargs)\n # pylint: disable=attribute-defined-outside-init\n self.object = instance = self.get_object()\n\n if _format == \"json\" or _format is None or _format == \"debug\":\n return Response(instance.json)\n if _format == \"xml\":\n return Response(instance.xml)\n if _format == \"geojson\":\n return super().retrieve(request, *args, **kwargs)\n if _format == Attachment.OSM:\n serializer = self.get_serializer(instance.osm_data.all())\n\n return Response(serializer.data)\n\n raise ParseError(_(f\"'{_format}' format unknown or not implemented!\"))", "def handle_models_request():\n # TODO: add sort and filter by creation/modification date\n return (\n json.dumps({\"models\": {\n k: d[\"data\"] for k, d in app.models.items()\n }}), 200,\n {'ContentType': 'application/json'}\n )", "def decode_response(\n res_model: Type[T],\n resp: Response,\n) -> T:\n if resp.headers.get(HEADER_CONTENT_TYPE) == MSGPACK_CONTENT_TYPE:\n return msgpack.decode(resp.content, type=res_model)\n return parse_raw_as(res_model, resp.text)", "def _to_entity(self):\n obj_dict = copy.deepcopy(vars(self))\n exclude_from_indexes = ()\n try:\n exclude_from_indexes = self._exclude_from_indexes_\n except AttributeError:\n pass\n\n try:\n key = self.key\n except AttributeError:\n key = CustomKey(self.__class__.__name__)\n\n entity = datastore.Entity(key=key, exclude_from_indexes=exclude_from_indexes)\n for dict_key, dict_val in obj_dict.copy().items():\n if dict_val is not None:\n if isinstance(dict_val, BaseModel):\n # If the value is an instance of BaseModel, convert the instance\n # into a \"dotted\" dictionary compatible with NDB entities.\n del obj_dict[dict_key]\n obj_dict.update(dict_val.dottify(dict_key))\n if isinstance(dict_val, list) and len(dict_val) > 0 and isinstance(dict_val[0], BaseModel):\n # if the value is a list of BaseModel objects\n dotted_dict_list = []\n dotted_dict = dict()\n for i, val in enumerate(dict_val):\n dotted_dict_list.append(val.dottify(dict_key))\n for dict_ in dotted_dict_list:\n for k, v in dict_.items():\n temp_val = dotted_dict.get(k) or []\n temp_val.append(v)\n dotted_dict[k] = temp_val\n del obj_dict[dict_key]\n obj_dict.update(dotted_dict)\n else:\n # if the value is False-y i.e. the key has not been set in the object,\n # delete the key from the object\n del obj_dict[dict_key]\n entity.update(obj_dict)\n return entity", "def get_outbound_entity(entity):\n cls = entity.__class__\n if cls in [DiasporaPost, DiasporaRequest, DiasporaComment, DiasporaLike, DiasporaProfile, DiasporaRetraction]:\n # Already fine\n return entity\n elif cls == Post:\n return DiasporaPost.from_base(entity)\n elif cls == Comment:\n return DiasporaComment.from_base(entity)\n elif cls == Reaction:\n if entity.reaction == \"like\":\n return DiasporaLike.from_base(entity)\n elif cls == Relationship:\n if entity.relationship in [\"sharing\", \"following\"]:\n # Unfortunately we must send out in both cases since in Diaspora they are the same thing\n return DiasporaRequest.from_base(entity)\n elif cls == Profile:\n return DiasporaProfile.from_base(entity)\n elif cls == Retraction:\n return DiasporaRetraction.from_base(entity)\n raise ValueError(\"Don't know how to convert this base entity to Diaspora protocol entities.\")", "def visit_entity(self, entity):", "def handle_create_response(self, response):\n\n if not self.model._meta['update_from_write'] or not response.content:\n return\n\n try:\n obj = self.obj_from_response(response)\n except ValueError:\n obj = None\n\n self.handle_response(response)\n\n return obj", "def entity(self):\n return self._entity", "def entity(self):\n return self._entity", "def create_json_from_model(self):\n json = {\n \"enableAutoReply\": self.enable_auto_reply,\n \"responseSubject\": self.response_subject,\n \"responseBodyPlainText\": self.response_body_plain_text,\n \"responseBodyHtml\": self.response_body_html,\n \"restrictToContacts\": self.restrict_to_contacts,\n \"restrictToDomain\": self.restrict_to_domain,\n \"startTime\": self.start_time,\n \"endTime\": self.end_time\n }\n return json", "def _get(self, request_obj):\n return ResponseData(ActionsSerializer(request_obj).serialize())", "def repr_ui(self):\n from lib.entities import entities_factory\n return (entities_factory.EntitiesFactory().\n convert_obj_repr_from_rest_to_ui(obj=self))", "def to_representation(self, data):\n # Dealing with nested relationships, data can be a Manager,\n # so, first get a queryset from the Manager if needed\n\n iterable = data.all() if isinstance(data, models.Manager) else data\n \n if self.parent is None:\n post = [\n self.child.to_representation(item) for item in iterable\n ]\n normalized_dict = OrderedDict()\n normalized_dict[self.child.Meta.model_name] = ReturnList(post, serializer=self)\n result = [normalized_dict]\n for normalized_item in self.child.Meta.normalized_fields:\n if normalized_item in self.instancelist_dict:\n normalized_dict[normalized_item] = \\\n ReturnList(self.make_normalized_item_list(normalized_item), serializer=self)\n return result\n\n if self.field_name in self.child.Meta.normalized_fields:\n result = [ item.id for item in iterable ]\n parent = self.root\n if not self.field_name in parent.instancelist_dict:\n parent.instancelist_dict[self.field_name] = []\n parent.instance_repr_dict[self.field_name] = self.child._readable_fields\n parent.instancelist_dict[self.field_name] = \\\n list(set(parent.instancelist_dict[self.field_name]) | set(iterable))\n else:\n result = [\n self.child.to_representation(item) for item in iterable\n ]\n\n return result", "def return_value(self, entity, value, request, environ, start_response,\n response_headers):\n response_type = self.content_negotiation(\n request, environ, self.ValueTypes)\n if response_type is None:\n return self.odata_error(\n request, environ, start_response, \"Not Acceptable\",\n 'xml, json or plain text formats supported', 406)\n if response_type == \"application/json\":\n if isinstance(value, edm.Complex):\n if request.version == 2:\n data = '{\"d\":%s}' % core.complex_property_to_json_v2(value)\n else:\n data = '{\"d\":%s}' % core.complex_property_to_json_v1(value)\n else:\n if request.version == 2:\n # the spec goes a bit weird here, tripping up over\n # brackets!\n data = '{\"d\":%s}' % \\\n core.simple_property_to_json_v2(value)\n else:\n data = '{\"d\":{%s}}' % \\\n core.simple_property_to_json_str(value)\n else:\n e = core.Property(None)\n e.set_xmlname((core.ODATA_DATASERVICES_NAMESPACE,\n value.p_def.name))\n doc = core.Document(root=e)\n e.set_value(value)\n data = str(doc)\n data = data.encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n if entity is not None:\n self.set_etag(entity, response_headers)\n start_response(\"%i %s\" % (200, \"Success\"), response_headers)\n return [data]", "def requests_response_to_model(response_transformer):\n def response_transform_decorator(original_func):\n \"\"\"\n Creates wrapper around a function that returns response\n \"\"\"\n def response_transformer_wrapper(*args, **kwargs):\n \"\"\"\n Log errors and apply transformation in response_handler_func\n \"\"\"\n try:\n response = original_func(*args, **kwargs)\n response.raise_for_status()\n\n except requests.exceptions.HTTPError:\n help_string = ('Please consult the Coursera Data '\n 'Exports Guide for further assistance: '\n 'https://partner.coursera.help/hc/en-us/articles/360021121132.') # noqa\n\n if (response.status_code == 403):\n help_string = ('Please authorize this application '\n 'by running:\\n'\n '\\t$ courseraoauth2client config authorize --app manage_research_exports\\n' # noqa\n 'See https://github.com/coursera/courseraoauth2client ' # noqa\n 'for more information on authorization.\\n'\n 'For further assistance, consult the '\n 'Coursera Data Exports Guide '\n 'https://partner.coursera.help/hc/en-us/articles/360021121132.') # noqa\n\n logging.error(\n 'Request to {url} with body:\\n\\t{body}\\nreceived response'\n ':\\n\\t{text}\\n'\n '{help_string}\\n'\n .format(url=response.url,\n text=response.text,\n body=(response.request and response.request.body),\n help_string=help_string))\n raise\n\n return response_transformer(response)\n return response_transformer_wrapper\n return response_transform_decorator", "def to_entity(cls, model_obj: \"SqlalchemyModel\"):\n item_dict = {}\n for field_name in attributes(cls.meta_.entity_cls):\n item_dict[field_name] = getattr(model_obj, field_name, None)\n return cls.meta_.entity_cls(item_dict)", "def to_api_object(self):\n visible_to = self.visibleTo.all()\n visible_to_list = []\n for visible in visible_to:\n visible_to_list.append(\"http://\"+visible.author_uid)\n\n # We only get the first 5 comments\n # Get the comments, be aware that comments might not be returned if the foreign author of the comment is unavailable\n comments_list = [comment.to_api_object() for comment in self.comment_set.all().order_by(\"-published\")[:5]]\n filtered_comments_list = [comment for comment in comments_list if 'error' not in comment['author']]\n\n\n\n return {\n \"title\": self.title,\n \"source\": self.source,\n \"origin\": self.origin,\n \"description\": self.description,\n \"contentType\": self.contentType,\n \"content\": self.content,\n \"author\": self.author.to_api_object(),\n \"categories\": [category.name for category in self.categories.all()],\n \"count\": len(filtered_comments_list),\n \"size\": self.size,\n \"next\": settings.HOST_URI + \"/posts/\" + str(self.id.hex) + \"/comments\",\n \"comments\": filtered_comments_list,\n \"published\": self.published,\n \"id\": str(self.id.hex),\n \"visibility\": self.visibility,\n \"visibleTo\": visible_to_list,\n \"unlisted\": self.unlisted\n }", "def GetModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def set_model_from_json(self, json):\n self.enable_auto_reply = get_value_from_json(json, \"enableAutoReply\")\n self.response_subject = get_value_from_json(json, \"responseSubject\")\n self.response_body_plain_text = json.get(\"responseBodyPlainText\")\n self.response_body_html = json.get(\"responseBodyHtml\")\n self.restrict_to_contacts = get_value_from_json(json, \"restrictToContacts\")\n self.restrict_to_domain = json.get(\"restrictToDomain\")\n self.start_time = get_value_from_json(json, \"startTime\")\n self.end_time = get_value_from_json(json, \"endTime\")\n return self", "def get_response(self, request):\n data = self.get_data(request)\n outrepr = self.get_outrepr(request)\n return outrepr(data)", "def convert_to_model(self, *args):", "def to_response_data(self) -> typing.Any:\n return None", "def to_http_response(self) -> HttpResponse:\n response = (\n JsonResponse(self.body)\n if (self.headers or {}).get(\"Content-Type\") == \"application/json\"\n else HttpResponse(self.body)\n )\n response.headers = self.headers\n return response", "def GetEntity(self):\n return self.__entity", "def to_model(self, payload):\n if not payload:\n return None\n if self.skip:\n raise SkipField\n\n remote_instance_id = self.id_from_payload(payload)\n model = self.storage.get(\n self.model_class,\n **{'remote_instance.id': remote_instance_id}\n )\n return model", "def _create_response_detail(self, request, serializer):\n def build_item(source):\n \"\"\"build time data\"\"\"\n return dict(id=source['id'],\n uuid=source['uuid'],\n creation_time=source['creation_time'],\n version=source['version'])\n if self._short_response(request):\n data = serializer.data\n if isinstance(data, (list)):\n detail = [build_item(item) for item in data]\n else:\n detail = build_item(data)\n else:\n detail = serializer.data\n return detail", "def from_dict(cls, dikt) -> 'InlineResponse200':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'InlineResponse200':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'InlineResponse200':\n return util.deserialize_model(dikt, cls)", "def to_model(self, payload):\n if self.skip:\n raise SkipField\n\n model = self.get_or_initialize_model(payload)\n model = self.update_model_fields(model, payload)\n return model", "def from_entity(cls, entity) -> \"ElasticsearchModel\":\n item_dict = {}\n for attribute_obj in attributes(cls.meta_.entity_cls).values():\n if isinstance(attribute_obj, Reference):\n item_dict[\n attribute_obj.relation.attribute_name\n ] = attribute_obj.relation.value\n else:\n item_dict[attribute_obj.attribute_name] = getattr(\n entity, attribute_obj.attribute_name\n )\n\n model_obj = cls(**item_dict)\n\n # Elasticsearch stores identity in a special field `meta.id`.\n # Set `meta.id` to the identifier set in entity\n id_field_name = id_field(cls.meta_.entity_cls).field_name\n\n if id_field_name in item_dict:\n model_obj.meta.id = item_dict[id_field_name]\n\n return model_obj", "def to_representation(self, data):\n # Dealing with nested relationships, data can be a Manager,\n # so, first get a queryset from the Manager if needed\n iterable = data\n\n return [\n self.child.to_representation(item) for item in iterable\n ]", "def me(self, request: Request) -> Response:\n\n serializer = self.get_serializer(instance=request.user)\n return Response(serializer.data)", "def model_to_instance(model):\n pass", "def from_dict(cls, dikt) -> \"InlineResponse201\":\n return util.deserialize_model(dikt, cls)", "def to_representation(self, instance):\n # Load the paginated descendant features\n if instance is None:\n # This happens when OPTIONS is called from browsable API\n return None\n self.add_sources(instance)\n\n ret = OrderedDict()\n fields = self._readable_fields\n\n for field in fields:\n attribute = field.get_attribute(instance)\n assert attribute is not None, (\n 'field.get_attribute return None for instance %s, field %s'\n % (instance, field))\n field_ret = field.to_representation(attribute)\n if isinstance(field, ListSerializer):\n # Wrap lists of related resources in a ReturnList, so that the\n # renderer has access to the serializer\n field_ret = ReturnList(field_ret, serializer=field)\n ret[field.field_name] = field_ret\n\n return ReturnDict(ret, serializer=self)", "def as_entity(self):\n return {\n 'type': self.api_sub_type,\n 'value': unquote(self.unique_id), # type: ignore\n 'id': self._data.get('id'),\n }", "async def get_entity(self):\n if not self.entity and await self.get_input_entity():\n try:\n self._entity =\\\n await self._client.get_entity(self._input_entity)\n except ValueError:\n pass\n\n return self._entity", "def from_entity(cls, e):\n kwargs = {name: e.get(name) for name, prop in cls._properties.items() if prop.is_id} # we need the id value\n obj = cls(**kwargs)\n obj._key = e.key\n\n for name, prop in cls._properties.items(): # set values\n if not prop.is_id:\n obj[name] = e.get(name)\n\n return obj", "def expand(self, model_pcoll):\n return (\n model_pcoll\n | 'Transforming the NDB models into Apache Beam entities' >> (\n beam.Map(job_utils.get_beam_entity_from_ndb_model))\n | 'Writing the NDB models to the datastore' >> (\n self.datastoreio.WriteToDatastore(feconf.OPPIA_PROJECT_ID))\n )", "def get_model_and_view(self):\n uri = self.request.path\n\n #handle datastore page\n page = Page.gql(\"where uri=:1\", uri).get()\n if page is not None and (page.is_public or users.is_current_user_admin()):\n hydrate(page)\n return ModelAndView(view='standard.html',\n model={\n 'page': page,\n 'syntax_list': get_syntax_list([page])\n })\n else:\n #handle static page\n filename = uri[1:] + '.html' if len(uri) > 1 else 'index.html'\n static_page_path = os.path.join(os.path.dirname(__file__), '..', 'content', 'pages', filename)\n if os.path.isfile(static_page_path):\n return ModelAndView(view = static_page_path, model = {})\n\n return self.get_list()", "def return_entity_collection(self, entities, request, environ,\n start_response, response_headers):\n response_type = self.content_negotiation(\n request, environ, self.FeedTypes)\n if response_type is None:\n return self.odata_error(\n request, environ, start_response, \"Not Acceptable\",\n 'xml, json or plain text formats supported', 406)\n entities.set_topmax(self.topmax)\n if response_type == \"application/json\":\n data = str('{\"d\":%s}' % ''.join(\n entities.generate_entity_set_in_json(request.version)))\n else:\n # Here's a challenge, we want to pull data through the feed\n # by yielding strings just load in to memory at the moment\n f = core.Feed(None, entities)\n doc = core.Document(root=f)\n f.collection = entities\n f.set_base(str(self.service_root))\n data = str(doc)\n data = data.encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n start_response(\"%i %s\" % (200, \"Success\"), response_headers)\n return [data]", "def record_responsify(serializer, mimetype):\n def view(pid, record, code=200, headers=None, links_factory=None):\n response = current_app.response_class(\n serializer.serialize(pid, record, links_factory=links_factory),\n mimetype=mimetype)\n response.status_code = code\n response.set_etag(str(record.revision_id))\n response.last_modified = record.updated\n if headers is not None:\n response.headers.extend(headers)\n\n if links_factory is not None:\n add_link_header(response, links_factory(pid))\n\n return response\n\n return view", "def json_response(self, request, *args, **kwargs):\n\n return HttpResponse(self.construct_json(),\n content_type='application/json',\n mimetype='application/json', status=self.status)", "def from_dict(cls, dikt) -> 'InlineResponse201':\n return util.deserialize_model(dikt, cls)", "def to_dict(self):\n print(\"\\n\\nSTARTING...\")\n ea = db.session.query(entity_assets).filter(entity_assets.c.entity_id == self.id).all()\n print(\"\\n\\nmade it\", ea)\n em = db.session.query(entity_meters).filter(entity_meters.c.entity_id == self.id).all()\n est = db.session.query(entity_statuses).filter(entity_statuses.c.entity_id == self.id).all()\n \n return {\n \"id\": self.id,\n \"user_id\": self.user_id,\n \"creator\": self.user.username,\n \"type\": self.type,\n \"category\": self.category,\n \"title\": self.title,\n \"description\": self.description,\n \"color\": self.color,\n \"icon\": self.icon,\n \"image\": self.image,\n \"created_at\": self.created_at,\n\n \"location_id\": self.location_id,\n \"generated_id\": self.generated_id,\n \n \"assets\": [(a.asset_id, a.quantity) for a in ea],\n \"statuses\": [(s.status_id, s.expiry) for s in est],\n \"meters\": [(m.meter_id, m.points) for m in em],\n \"slots\": [(slot.slot_id, slot.filler_id) for slot in self.entity_slots],\n }", "def get_model(self):\n url = self.resource()\n params = {'data': ''}\n resp = self._client.get(url, params=params)\n\n return resp.text", "def entity(self, elem):\n return data.Entity(self, elem)", "def parse_obj_to_elink_post_response_model(\n self, obj\n ) -> Union[None, ELinkPostResponseModel]:\n try:\n elink_response_record = ELinkPostResponseModel.parse_obj(obj)\n return elink_response_record\n except Exception as e:\n self.logger.error(\n f\"Skipping. Error:{e}.\\n Cannot Parse the received Elink Response: \\n{obj} \"\n )\n return None", "def make_response_paginated(paginator: PaginationBase, op: Operation) -> None:\n status_code, item_schema = _find_collection_response(op)\n\n # Switching schema to Output schema\n try:\n new_name = f\"Paged{item_schema.__name__}\"\n except AttributeError:\n new_name = f\"Paged{str(item_schema).replace('.', '_')}\" # typing.Any case\n\n new_schema = type(\n new_name,\n (paginator.Output,),\n {\n \"__annotations__\": {paginator.items_attribute: List[item_schema]}, # type: ignore\n },\n ) # typing: ignore\n\n response = op._create_response_model(new_schema)\n\n # Changing response model to newly created one\n op.response_models[status_code] = response", "def retrieve(self, request, pk=None):\n employee = self.get_employee_object(pk)\n print(F\"Employee: {employee}\")\n serializer = data_serializers.PresentEmployeeDataSerializer(employee)\n return Response(serializer.data)", "async def _parse_entities(self, responses: SourceResponses) -> Entities:\n entity_attributes = [\n {\n \"url\": str(row[\"URL\"]),\n \"violation_type\": row[\"Violation Type\"],\n \"impact\": row[\"Impact\"],\n \"element\": row[\"DOM Element\"],\n \"page\": re.sub(r\"https?://[^/]+\", \"\", row[\"URL\"]),\n \"description\": row[\"Messages\"],\n \"help\": row[\"Help\"],\n }\n for row in await self.__parse_csv(responses)\n ]\n return Entities(\n Entity(key=md5_hash(\",\".join(str(value) for value in attributes.values())), **attributes)\n for attributes in entity_attributes\n )", "def self(self, request):\n serializer_class = self.get_serializer_class()\n serializer = serializer_class(request.user, context={ \"request\": request })\n return Response(serializer.data)", "def __call__(self, *args, **kwargs):\n return Entity(self, *args, **kwargs)", "def get_result_model(cls):\n raise NotImplementedError()", "def retrieve(self, request, pk=None, **kwargs):\n serializer = self.serializer_class(self.get_object())\n return Response(serializer.data, status=status.HTTP_200_OK)", "def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n\n data = {\n 'header': response_header(msg='Retrieve request successfully processed.',\n username=request.user.username,\n api_status=constants.STATUS_OK),\n 'detail': serializer.data\n }\n\n return Response(data=data, status=status.HTTP_200_OK)", "def to_response(self):\n op_result, remote_branch = self.execute_and_sync()\n if isinstance(op_result, Job):\n return result_response(MigrateProjectCtrl.JOB_RESPONSE_SERIALIZER, op_result)\n\n was_migrated, template_migrated, docker_migrated, messages, warnings, errors = op_result\n\n response = {\n \"messages\": messages,\n \"warnings\": warnings,\n \"errors\": errors,\n \"was_migrated\": was_migrated,\n \"template_migrated\": template_migrated,\n \"docker_migrated\": docker_migrated,\n \"remote_branch\": remote_branch,\n }\n\n return result_response(self.RESPONSE_SERIALIZER, response)", "def GetModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _bld_resp(self, status=200, entry_or_list=None):\n resp = pvm_adp.Response('meth', 'path', status, 'reason', {})\n resp.entry = None\n resp.feed = None\n if entry_or_list is None:\n resp.feed = pvm_ent.Feed({}, [])\n else:\n if isinstance(entry_or_list, list):\n resp.feed = pvm_ent.Feed({}, entry_or_list)\n else:\n resp.entry = entry_or_list\n return resp", "def transformation():\n AutoGluonClassifierService.load_model()\n data = None\n print(f'Request Content Type: {flask.request.content_type}')\n # Convert from CSV to pandas\n if flask.request.content_type == 'application/x-image':\n data = flask.request.data.decode('utf-8')\n tmp_image_path = f'/tmp/{uuid1().hex}.jpg'\n image_bytes = io.BytesIO(data)\n image = Image.open(image_bytes)\n image.save(tmp_image_path)\n else:\n return flask.Response(\n response='This predictor only supports JSON or CSV data. data is preferred.',\n status=415, mimetype='text/plain'\n )\n\n print('Classifying image with {}')\n # Do the prediction\n class_index, class_probability = AutoGluonClassifierService.predict(tmp_image_path)\n prediction = {\n 'ClassIndex': class_index,\n 'PredictionProba': class_probability\n }\n\n return flask.Response(response=json.dumps(prediction), status=200, mimetype='application/json')", "def to_api_data(self):\n raise NotImplementedError()", "def view_ballot_entities_as_map(self, request):\n\n @request.after\n def add_last_modified(response):\n add_last_modified_header(response, self.vote.last_modified)\n\n locale = request.params.get('locale')\n if locale in request.app.locales:\n request.locale = locale\n\n # def translate(text):\n # if locale in request.app.locales:\n # translator = request.app.translations.get(locale)\n # return text.interpolate(translator.gettext(text))\n # return text\n\n return {\n 'model': self,\n 'layout': DefaultLayout(self, request),\n 'type': 'map',\n 'scope': 'entities',\n 'year': self.vote.date.year,\n 'thumbs': 'true',\n 'color_scale': 'rb',\n 'label_left_hand': _(\"Nay\"),\n 'label_right_hand': _(\"Yay\"),\n 'data_url': request.link(self, name='by-entity'),\n }", "def P_GetModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def create_foundation_entity_instance(entity):\n # Get an SDK class and use the configuration generation behaviour to pass in parameters\n sdk_instance = SDK(**(request.get_json() or {}))\n\n try:\n # Entity names are PascalCase, SDK entity methods are snake case.\n method_name = re.sub('(?<!^)(?=[A-Z])', '_', entity).lower()\n entity_class = getattr(sdk_instance.foundation, method_name)\n except AttributeError:\n raise NotFound(\"Entity '%s' which was reformatted to '%s' cannot be found.\" % (entity, method_name))\n\n instance = LockedInstance(\n lock=threading.Lock(),\n instance=entity_class(),\n module=None,\n entity=entity,\n uuid=str(uuid.uuid4().hex),\n created_at=datetime.datetime.utcnow(),\n )\n STORE[instance.uuid] = instance\n response = app.response_class(\n response=json.dumps(serialise_instance(instance)),\n status=201,\n mimetype='application/json'\n )\n return response", "def to_response(self):\n return make_response(self.res, self.status)", "def to_response(self):\n return make_response(self.res, self.status)", "def ng_get(self, request, *args, **kwargs):\r\n return self.build_json_response(self.get_object())", "def _get_latest_model(cls, model, spec):\n if hasattr(model, \"KEY\") and model.KEY is not None:\n spec[\"content\"] = model\n model = cls\n elif hasattr(model, \"STRUCT\"):\n spec[\"content\"] = model.STRUCT\n else:\n # Is a dict\n spec[\"content\"] = model\n spec[\"object\"] = model\n return model", "def _to_storage_model(self, store, result):\n file_dict = result.as_dict()\n file_dict.pop('object_type')\n file_dict['store'] = store\n return StorageModel(**file_dict)" ]
[ "0.6371782", "0.6292882", "0.5778668", "0.57751584", "0.5772281", "0.5670169", "0.5642209", "0.56282586", "0.55987585", "0.5566942", "0.5559241", "0.55523074", "0.5506003", "0.5487401", "0.5487401", "0.545934", "0.54588723", "0.5455508", "0.5450392", "0.5440856", "0.5438918", "0.5421224", "0.5420274", "0.53842944", "0.5370289", "0.536751", "0.5328961", "0.53204244", "0.53187895", "0.52719396", "0.52590096", "0.5250983", "0.52446085", "0.5222857", "0.5201646", "0.5168914", "0.51684976", "0.51643836", "0.51606107", "0.51606107", "0.514654", "0.51362807", "0.5130149", "0.51229316", "0.51209617", "0.5120548", "0.51073766", "0.5083693", "0.5064656", "0.506226", "0.5059276", "0.5056196", "0.5051084", "0.5044441", "0.50337726", "0.5031902", "0.5028191", "0.5025125", "0.5025125", "0.5025125", "0.50167584", "0.50034803", "0.49958962", "0.49917474", "0.4986054", "0.49810514", "0.4973489", "0.4967613", "0.49600527", "0.49539682", "0.4953412", "0.495299", "0.49433324", "0.4939898", "0.4936271", "0.49316028", "0.4930922", "0.49285132", "0.49216267", "0.49060264", "0.49051848", "0.49041614", "0.4899206", "0.48702037", "0.4865151", "0.48601088", "0.4858133", "0.48510063", "0.4850181", "0.48499975", "0.48472726", "0.4844284", "0.48404506", "0.48394597", "0.47948727", "0.47912887", "0.47896257", "0.47896257", "0.47861665", "0.4785181", "0.47795177" ]
0.0
-1
Hybrid Log Gamma test
def main(): for i in range(11): i = i / 10 print('L * 1000', i, L(i) * 1000)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gamma(x):\n return 0.0", "def lgamma(x):\n return 0.0", "def log_gamma_unnormalised_lpdf(x, alpha, beta):\n return alpha * x - beta * tf.exp(x)", "def guestimate_gamma(x_data, time):\n ga0 = np.clip(np.log(max(x_data[0], 0) / (x_data[-1] + 1e-6)) / time[-1], 1e-3, 1e3)\n return ga0", "def log_prob(self):", "def statePosteriors(log_alpha, log_beta):\n\n gamma = np.zeros(log_alpha.shape)\n\n gamma = log_alpha + log_beta\n\n gamma = gamma - logsumexp(log_alpha[-1,:])\n\n return gamma", "def log_Schechter_log(self, logl, alpha, logls, logl0):\n phi = (logl - logls) * (alpha+1) * np.log(10.) - np.power(10., logl-logls)\n lik = phi.copy()\n lik [logl < logl0] = -1e99\n return lik", "def gamma(a,b,c,d):\n g1 = max((c + d) * (1 - b) * b / (c*d * np.math.log(2)), 0.0)\n g2 = max((c + d) * 21**2 / (c*d * (1 - b) * b*a**2), 1.0)\n g = np.math.sqrt(g1 * np.math.log(g2, 2))\n return g", "def gamma(t):\n #s = sum_log([alphas[t,i] + betas[t,i] for i in range(self.S)])\n return [np.exp(alphas[t,i] + betas[t,i] - logZ) for i in range(self.S)]", "def __init__(self):\n GinacFunction.__init__(self, \"log_gamma\", latex_name=r'\\log\\Gamma',\n conversions=dict(mathematica='LogGamma',\n maxima='log_gamma',\n sympy='loggamma'))", "def test_log_con():\n c=14\n assert {'diff':EF.log(c).der, 'value': EF.log(c).val}=={'diff':0, 'value': math.log(c)}", "def utility(consumption_vector, gamma):\n\n if gamma == 1:\n U = np.log(consumption_vector)\n else:\n U = (consumption_vector ** (1 - gamma)) / (1 - gamma)\n return U", "def test_logistic():\n r=np.random.normal(size=20)\n assert np.isclose( ilogistic(logistic(r)),r ).all()", "def logistic(v: float) -> float:\n v = clip(v, -50, 50) # Improve numerical stability.\n return 1 / (1 + math.exp(-v))", "def gamma(gp_link=None,beta=1.):\r\n if gp_link is None:\r\n gp_link = noise_models.gp_transformations.Log_ex_1()\r\n analytical_mean = False\r\n analytical_variance = False\r\n return noise_models.gamma_noise.Gamma(gp_link,analytical_mean,analytical_variance,beta)", "def gamma(k, z):\n return 1", "def log_prob(self, th):\n\n\t\tif len(th.shape) == 2:\n\t\t\tth0, th1 = th[:,0], th[:,1]\n\t\t\tmask = (th0 > 0.) * (th1 > 0.)\n\t\telif len(th.shape) == 1:\n\t\t\tth0, th1 = float(th[0]), float(th[1])\n\t\t\tmask = torch.tensor([th0 > 0., th1 > 0.])\n\t\telse:\n\t\t\traise IndexError(\"This class is only for 2D Gamma prior for GSE model\")\n\t\tth0, th1 = torch.as_tensor(th0), torch.as_tensor(th1)\n\t\tvals = (self.beta_prior.log_prob(th0) + self.gamma_prior.log_prob(th1)).reshape(-1)\n\t\tvals = vals.numpy()\n\t\tvals[~mask] = -float('inf')\n\t\treturn vals", "def kl_gamma(x, y, a=1):\n x = max(x, eps)\n y = max(y, eps)\n return a*(x/y - 1 - log(x/y))", "def logp(value, a):\n # only defined for sum(value) == 1\n res = pt.sum(logpow(value, a - 1) - gammaln(a), axis=-1) + gammaln(pt.sum(a, axis=-1))\n res = pt.switch(\n pt.or_(\n pt.any(pt.lt(value, 0), axis=-1),\n pt.any(pt.gt(value, 1), axis=-1),\n ),\n -np.inf,\n res,\n )\n return check_parameters(\n res,\n a > 0,\n msg=\"a > 0\",\n )", "def schechter(logm, logphi, logmstar, alpha, m_lower=None):\n phi = ((10**logphi) * np.log(10) *\n 10**((logm - logmstar) * (alpha + 1)) *\n np.exp(-10**(logm - logmstar)))\n return phi", "def log1p(x):\n return 0.0", "def expected_log_g(self):\n raise NotImplementedError()\n E_lng = np.zeros_like(self.mf_gamma)\n\n # \\psi(\\sum_{b} \\gamma_b)\n trm2 = psi(self.mf_gamma.sum(axis=2))\n for b in xrange(self.B):\n E_lng[:,:,b] = psi(self.mf_gamma[:,:,b]) - trm2\n\n return E_lng", "def logistic(val):\n return 1.0 / (1.0 + np.exp(-val))", "def logit_link(x):\n\n return 1 / (1 + math.exp(-0.05 * x))\n # return 1 / (1 + math.exp(-0.01 * x))", "def MvNormalLogp():\n cov = pt.matrix(\"cov\")\n cov.tag.test_value = floatX(np.eye(3))\n delta = pt.matrix(\"delta\")\n delta.tag.test_value = floatX(np.zeros((2, 3)))\n\n cholesky = Cholesky(lower=True, on_error=\"nan\")\n\n n, k = delta.shape\n n, k = f(n), f(k)\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n result = n * k * pt.log(f(2) * np.pi)\n result += f(2) * n * pt.sum(pt.log(diag))\n result += (delta_trans ** f(2)).sum()\n result = f(-0.5) * result\n logp = pt.switch(ok, result, -np.inf)\n\n def dlogp(inputs, gradients):\n (g_logp,) = gradients\n cov, delta = inputs\n\n g_logp.tag.test_value = floatX(1.0)\n n, k = delta.shape\n\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n inner = n * pt.eye(k) - pt.dot(delta_trans.T, delta_trans)\n g_cov = solve_upper(chol_cov.T, inner)\n g_cov = solve_upper(chol_cov.T, g_cov.T)\n\n tau_delta = solve_upper(chol_cov.T, delta_trans.T)\n g_delta = tau_delta.T\n\n g_cov = pt.switch(ok, g_cov, -np.nan)\n g_delta = pt.switch(ok, g_delta, -np.nan)\n\n return [-0.5 * g_cov * g_logp, -g_delta * g_logp]\n\n return OpFromGraph([cov, delta], [logp], grad_overrides=dlogp, inline=True)", "def statePosteriors(log_alpha, log_beta):\n return log_alpha + log_beta - logsumexp(log_alpha[-1,:])", "def ga_log(R):\n phiP, t_normal_n, t_perpendicular_n = extractRotorComponents(R)\n return phiP + t_normal_n + t_perpendicular_n", "def test_loguniform(self):\n\n times = np.logspace(-4, -2, 3)\n\n waveObj1 = vrm.waveforms.StepOff(t0=0.0)\n waveObj2 = vrm.waveforms.SquarePulse(delt=0.02)\n\n chi0 = np.array([0.0])\n dchi = np.array([0.01])\n tau1 = np.array([1e-10])\n tau2 = np.array([1e3])\n\n decay1b = (dchi / np.log(tau2 / tau1)) * waveObj2.getCharDecay(\"b\", times)\n decay2b = waveObj2.getLogUniformDecay(\"b\", times, chi0, dchi, tau1, tau2)\n\n decay1dbdt = (dchi / np.log(tau2 / tau1)) * waveObj1.getCharDecay(\"dbdt\", times)\n decay2dbdt = waveObj1.getLogUniformDecay(\"dbdt\", times, chi0, dchi, tau1, tau2)\n decay3dbdt = (dchi / np.log(tau2 / tau1)) * waveObj2.getCharDecay(\"dbdt\", times)\n decay4dbdt = waveObj2.getLogUniformDecay(\"dbdt\", times, chi0, dchi, tau1, tau2)\n\n err1 = np.max(np.abs((decay2b - decay1b) / decay1b))\n err2 = np.max(np.abs((decay2dbdt - decay1dbdt) / decay1dbdt))\n err3 = np.max(np.abs((decay4dbdt - decay3dbdt) / decay3dbdt))\n\n self.assertTrue(err1 < 0.01 and err2 < 0.01 and err3 < 0.01)", "def haganLogNormalApprox (y, expiry , F_0 , alpha_0 , beta ,nu , rho ):\n one_beta = 1.0 - beta\n one_betasqr = one_beta * one_beta\n if F_0 != y:\n fK = F_0 * y\n fK_beta = math.pow(fK , one_beta / 2.0)\n log_fK = math.log(F_0 / y)\n z = nu / alpha_0 * fK_beta * log_fK\n x = math.log(( math .sqrt (1.0 - 2.0 * rho * z + z * z) + z - rho) / (1 - rho))\n sigma_l = (alpha_0 / fK_beta / (1.0 + one_betasqr / 24.0 * log_fK * log_fK + math.pow( one_beta * log_fK , 4) / 1920.0) * (z / x))\n sigma_exp = ( one_betasqr / 24.0 * alpha_0 * alpha_0 / fK_beta / fK_beta + 0.25 * rho * beta * nu * alpha_0 / fK_beta + (2.0 - 3.0 * rho * rho) / 24.0 * nu * nu)\n sigma = sigma_l * ( 1.0 + sigma_exp * expiry)\n else:\n f_beta = math.pow(F_0 , one_beta)\n f_two_beta = math.pow(F_0 , (2.0 - 2.0 * beta ))\n sigma = (( alpha_0 / f_beta) * (1.0 + (( one_betasqr / 24.0) * ( alpha_0 * alpha_0 / f_two_beta ) + (0.25 * rho * beta * nu * alpha_0 / f_beta) + (2.0 - 3.0 * rho * rho) / 24.0 * nu * nu) * expiry))\n \n return sigma", "def log_probability(self, samples):\n pass", "def Schechter_log(self, logl, phis, alpha, logls):\n l = np.power(10., logl)\n ls = np.power(10., logls)\n phi = np.log(10) * phis * np.power(l / ls, (alpha + 1)) * np.exp(-l / ls)\n return phi", "def log_like_gamma(params, n):\n alpha, beta = params\n\n # limits:\n # alpha > 0\n # beta > 0\n if alpha <= 0 or beta <= 0:\n return -np.inf\n\n return np.sum(st.gamma.logpdf(n, alpha, scale=1/beta))", "def lognormal(mu, hw, x): \n return np.exp(-( np.log(x/mu) / (0.01*hw) )**2)", "def gammaln(F):\n def compute(value):\n \"\"\"Return log(gamma(value))\n \"\"\"\n if isinstance(value, Number):\n if sc is not None:\n return sc.gammaln(value, dtype='float32')\n else:\n raise ValueError('Numbers are not supported as input if scipy is not installed')\n return F.npx.gammaln(value)\n return compute", "def test_approximate_gamma(self, k):\n mean_column = prior.PriorParams.field_index(\"mean\")\n var_column = prior.PriorParams.field_index(\"var\")\n x = self.priors[self.n][k][mean_column]\n xvar = self.priors[self.n][k][var_column]\n # match mean/variance\n alpha_0, beta_0 = approximate_gamma_mom(x, xvar)\n ck_x = alpha_0 / beta_0\n ck_xvar = alpha_0 / beta_0**2\n assert np.isclose(x, ck_x)\n assert np.isclose(xvar, ck_xvar)\n # match approximate sufficient statistics\n logx, _, _ = approx.approximate_log_moments(x, xvar)\n alpha_1, beta_1 = approx.approximate_gamma_kl(x, logx)\n ck_x = alpha_1 / beta_1\n ck_logx = hypergeo._digamma(alpha_1) - np.log(beta_1)\n assert np.isclose(x, ck_x)\n assert np.isclose(logx, ck_logx)\n # compare KL divergence between strategies\n kl_0 = kl_divergence(\n lambda x: conditional_coalescent_pdf(x, self.n, k),\n lambda x: scipy.stats.gamma.logpdf(x, alpha_0, scale=1 / beta_0),\n )\n kl_1 = kl_divergence(\n lambda x: conditional_coalescent_pdf(x, self.n, k),\n lambda x: scipy.stats.gamma.logpdf(x, alpha_1, scale=1 / beta_1),\n )\n assert kl_1 < kl_0", "def logistic_function(z):\n\tg = 1/(1+np.exp(-z))\n\t#g = np.exp(z)/(1+np.exp(z))\n\treturn g", "def logg_to_vt_K09(logg):\n return 2.13 - 0.23 * logg", "def gamma(x):\r\n gammax = ((x + 0.055) / 1.055) ** 2.4 if x > 0.04045 else x / 12.92\r\n return gammax", "def general_gamma_binary(uv, wavel, sep, PA, contrast):\n x, y = uv[:, 0], uv[:, 1]\n k = 2 * np.pi / wavel\n beta = mas2rad(sep)\n th = np.deg2rad(PA)\n i2 = 1\n i1 = 1 / contrast\n phi1 = k * x * beta * np.cos(th)/2\n phi2 = k * y * beta * np.sin(th)/2\n out = i1 * np.exp(-1j * (phi1 + phi2))\n out += i2 * np.exp(1j * (phi1 + phi2))\n return out / (i1 + i2)", "def test_loglike(dlm,Cl,noise,beam):\n lmax = Cl.shape[0]\n tt_exp = -1./2 * np.real(np.vdot(dlm.T,hp.almxfl(dlm,1/(beam[:lmax]**2*Cl[:,1]+noise[:lmax]))))\n #plt.plot(Cl[:,1])\n tt_det = - 1./2 *(np.arange(1,lmax+1)*np.log((noise[:lmax]+Cl[:,1]*beam[:lmax]**2))).sum() \n tt_f = tt_exp + tt_det\n return tt_exp,tt_det,tt_f#,Cl[:,1]", "def logistic(mu, hw, x): \n n = np.exp(- ((x-mu)/(.477*hw))**2)\n return (2. * n)/( 1 + n)", "def gamma(self):\r\n raise NotImplementedError('not implemented yet, will use spouge approximation')", "def log2(a):", "def lnprior(theta):\n gamma, A = theta\n\n if 0.0 < gamma and 0.0 < A < 2.0 :\n return ( np.log(1.0/A) + np.log(1.0/(1.0+(gamma**2.0))) )\n\n return -np.inf", "def statePosteriors(log_alpha, log_beta):", "def logistic(x):\n try:\n denom = (1 + math.e ** -x)\n except OverflowError:\n return 0.0\n return 1.0 / denom", "def BernoulliGaussianLoss(mu_kl, log_var_kl) :\n def bgl(x, p) :\n N = K.int_shape(p)[1]\n recon = N*metrics.binary_crossentropy(x, p)\n dkl = -0.5 * K.sum(-K.exp(log_var_kl) - K.square(mu_kl) + 1. + log_var_kl, axis=-1)\n return dkl + recon\n return bgl", "def test_posterior_logprobs(self):\n x = list(product([True, False], repeat=2))\n xs = list(e for e in product(x, repeat=3))\n all_obs = list(o for o in xs\n if all(any(e) and not all(e) for e in o))\n total = logsumexp(list(posterior_logprobs(np.array(obs), self.S, self.A, self.E)[1]\n for obs in all_obs))\n assert_allclose(total, np.log(1))", "def gamma(p, m):\r\n return 1/np.sqrt(1-beta(p, m)**2)", "def f_U_function(W, gamma):\n if W > 0:\n if gamma == 1:\n utility = math.log(W)\n elif gamma >= 0:\n # Minus 1 or not.\n utility = math.pow(W, 1 - gamma) / (1 - gamma)\n else:\n print('The risk aversion parameter should be non-negative numbers.')\n else:\n print('The wealth should be non-negative. Now {}.'.format(W))\n utility = 0\n return utility", "def log10(a):", "def log_marg_like(self, gamma, gamma0, lamb, nu):\n return self.ppi_distribution(gamma, gamma0, lamb).logpdf(self.Y, precision_multiplier=nu)", "def gamma(n, accuracy=0.00000001, stop=None):\n if stop is None:\n stop = -n*math.log(accuracy**(1/n)/n)/math.log(2)\n return defint(lambda x: x**(n-1)*math.e**(-1*x), 0, stop)", "def _normal_log_prob(self, r, scale_log):\n return -(r**2) / 2 - scale_log - self.const", "def solve_gamma(t, old, total):\n\n old, total = np.mean(old), np.mean(total)\n gamma = -1 / t * np.log(old / total)\n\n return gamma", "def g(z):\n return 1. / (1. + np.exp(-z))", "def lognormexp(values, dim=0):\n\n log_denominator = torch.logsumexp(values, dim=dim, keepdim=True)\n # log_numerator = values\n return values - log_denominator", "def lognormexp(values, dim=0):\n\n log_denominator = torch.logsumexp(values, dim=dim, keepdim=True)\n # log_numerator = values\n return values - log_denominator", "def test_returns_same_when_gamma_1(self):\n img = np.array([1.0, 2.0, 3.0])\n\n actual = util.apply_gamma(img, out_min=1, out_max=3, gamma=1)\n npt.assert_array_equal(actual, img)", "def gamma_natural(A):\n return 2.6544188e-12*A", "def statePosteriors(log_alpha, log_beta):\n N = log_alpha.shape[0]\n M = log_alpha.shape[1]\n log_gamma = np.zeros((N, M))\n for n in range(N):\n log_gamma[n, :] = log_alpha[n, :] + log_beta[n, :] - logsumexp(log_alpha[N-1, :])\n\n return log_gamma", "def calc_alpha(epsilon): \n return float(0.5 * np.log((1-epsilon)/epsilon))", "def test_g1(self):\n self.setUp()\n theta = self.data.theta\n beta_0, beta_1 = theta[\"beta_0\"], theta[\"beta_1\"]\n gamma_0, gamma_1 = theta[\"gamma_0\"], theta[\"gamma_1\"]\n g1 = self.E_func.g1(self.S, gamma_0, beta_0, gamma_1, beta_1, broadcast=False)\n g1_0_1 = np.exp(np.array([356/3, 335/3, 227/3, 275/3]))\n g1_1_3 = np.exp(np.array([147, 172.5, 145.5, 61.5]))\n np.testing.assert_almost_equal(np.log(g1[0, 0, :, 0]), np.log(g1_0_1))\n np.testing.assert_almost_equal(np.log(g1[0, 1, :, 1]), np.log(g1_1_3))", "def _logprob(self, sample):\n return 0, 0", "def global_loss(bce_loss, mu, log_var):\n kl_divergence = 0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())\n return bce_loss - kl_divergence", "def log_hchg(x, a1, a2, mu1, mu2):\n assert np.alltrue(mu1 > 0) and np.alltrue(mu2 > 0)\n assert np.alltrue(a1 > 0) and np.alltrue(a2 > 0)\n \n out_shp = np.broadcast(x, a1, a2, mu1, mu2).shape\n if out_shp == ():\n out_shp = (1,)\n \n x = np.broadcast_to(x, out_shp).ravel()[:, np.newaxis]\n a1 = np.broadcast_to(a1, out_shp).ravel()[:, np.newaxis]\n a2 = np.broadcast_to(a2, out_shp).ravel()[:, np.newaxis]\n mu1 = np.broadcast_to(mu1, out_shp).ravel()[:, np.newaxis]\n mu2 = np.broadcast_to(mu2, out_shp).ravel()[:, np.newaxis]\n \n j = np.arange(250)\n \n out = j * np.log(mu1 * x) - sp.gammaln(j+1)\n out += log_poch(a1+a2, j) - log_poch(a1, j)\n out += np.log(sp.hyp1f1(a1+a2+j, a2, mu2*(1-x)))\n out = sp.logsumexp(out, axis=1)\n return out.reshape(out_shp) if out.size > 1 else float(out)", "def log10(self):\n return Factor().__build( VarSet(self.v) , np.log10(self.t) )", "def logp(self, value: TensorType, **kwargs) -> TensorType:", "def logp(self, value: TensorType, **kwargs) -> TensorType:", "def gamma(self, tl):\n\t return self.GAMMA_0*(1. + self.GAMMA_1*(tl - self.TO) + self.GAMMA_2*(tl - self.TO)**2.);", "def test_gamma_basis_hon(self):\n def row_generator():\n return [random.betavariate(0.5, 0.5) for i in range(self.d)]\n\n self._test_sample_basis_hon(row_generator)", "def Gamma_correction(low_frequency, alpha=0.5):\n x_max = np.max(low_frequency)\n x_min = np.min(low_frequency)\n\n phi_x = (low_frequency - x_min) / (x_max - x_min)\n f_x = np.pi * phi_x\n gamma_x = 1.0 + alpha * np.cos(f_x)\n output = x_max * phi_x ** (1.0 / gamma_x)\n return output", "def logbasechange(a,b):\n return np.log(b)/np.log(a)", "def _loglike(self, y, f):\n bincoef = tf.lgamma(self.n + 1) - tf.lgamma(y + 1) \\\n - tf.lgamma(self.n - y + 1)\n ll = bincoef + y * tf.log(pos(f)) + (self.n - y) * tf.log(pos(1 - f))\n return ll", "def log_poisson(k, l):\n return k*np.log(l) -l - gammaln(k+1)", "def logdprior(parameters, hyperparameters):\n sigma_w_part = parameters[0] + invgamma_logpdf(parameters[0],\n hyperparameters[\"sigma_w_shape\"], hyperparameters[\"sigma_w_scale\"])\n sigma_v_part = parameters[1] + invgamma_logpdf(parameters[1], hyperparameters[\"sigma_v_shape\"], hyperparameters[\"sigma_v_scale\"])\n return sigma_w_part + sigma_v_part", "def LogPrior(cube):\n\tif (cube<0)+(cube>265):\n\t\treturn -np.inf\n\telse:\n\t\treturn np.log10(1./265)", "def sinkhorn_log(mu,nu,c,epsilon, \n options={'niter':1000, 'tau':-0.5, 'rho':np.inf}):\n\n for key,val in zip(['tau','rho','niter'],[-.5,np.inf,500]):\n options.setdefault(key, val)\n rho,tau,niter = options['rho'],options['tau'],options['niter']\n\n lam = rho/(rho+epsilon)\n if rho==np.inf:\n lam=1.0\n\n H1 = np.ones_like(mu)\n H2 = np.ones_like(nu)\n\n ave = lambda tau, u, u1: tau*u+(1-tau)*u1\n\n lse = lambda A: np.log(np.sum(np.exp(A),axis=1))\n M = lambda u,v:(-c+u[:,np.newaxis]@H2[np.newaxis,:] + H1[:,np.newaxis]@v[np.newaxis,:] )/epsilon\n\n # kullback divergence\n H = lambda p: -np.sum( p.flatten()*(np.log(p.flatten()+1e-20)-1) )\n KL = lambda h,p: np.sum( h.flatten()* np.log( h.flatten()/p.flatten() ) - h.flatten()+p.flatten())\n KLd = lambda u,p: np.sum( p.flatten()*( np.exp(-u.flatten()) -1) )\n dotp = lambda x,y: np.sum(x*y); \n\n err,Wprimal,Wdual = [],[],[]\n u = np.zeros_like(mu)\n v = np.zeros_like(nu)\n\n for _ in range(niter):\n u1=u\n u = ave(tau, u, lam*epsilon*np.log(mu) - lam*epsilon*lse( M(u,v) ) + lam*u )\n v = ave(tau, v, lam*epsilon*np.log(nu) - lam*epsilon*lse( M(u,v).T) + lam*v )\n gamma = np.exp(M(u,v))\n\n if rho==np.inf: \n Wprimal.append(dotp(c,gamma) - epsilon*H(gamma))\n Wdual.append( dotp(u,mu) + dotp(v,nu) - epsilon*np.sum(gamma) )\n err.append( np.linalg.norm( np.sum(gamma,axis=1)-mu ) )\n else:\n Wprimal.append( dotp(c,gamma) - epsilon*H(gamma) \\\n + rho*KL(np.sum(gamma,axis=1),mu) \\\n + rho*KL(np.sum(gamma,axis=0),nu) )\n\n Wdual.append( -rho*KLd(u/rho,mu) - rho*KLd(v/rho,nu) \\\n - epsilon*np.sum( gamma))\n err.append(np.linalg.norm(u-u1, ord=1) )\n \n WDistance = Wprimal[-1]+epsilon*H(gamma)\n\n return gamma,Wprimal,Wdual,err,WDistance", "def gauss(x, gamma):\n return 1 / np.sqrt(2*np.pi) / gamma * np.exp(-(x/gamma)**2 / 2)", "def gamma(num: float) -> float:\n if num <= 0:\n raise ValueError(\"math domain error\")\n if num > 171.5:\n raise OverflowError(\"math range error\")\n elif num - int(num) not in (0, 0.5):\n raise NotImplementedError(\"num must be an integer or a half-integer\")\n elif num == 0.5:\n return sqrt(pi)\n else:\n return 1.0 if num == 1 else (num - 1) * gamma(num - 1)", "def logistic(scale, shift, stretch, t):\r\n return scale / (1 + np.power(np.e, -1.0*(t - shift )/ stretch))", "def lorentz(x, gamma):\n return 1 / cs.pi * 0.5 * gamma / ((0.5 * gamma**2) + x**2)", "def Gamma_10(Ea,mu,T):\n return(Gamma(Ea,E0,V)*(1-fermi((Ea-E0),mu,T)))", "def addlogs(a,b):\n \n if a>b:\n return a + np.log(1+np.exp(b-a))\n else:\n return b + np.log(1+np.exp(a-b))", "def logistic(x_0, max_value, midpoint, steepness):\n return max_value / (1 + math.exp(-(x_0 - midpoint) / steepness))", "def lnlike(theta, dtarray, dmagarray, sigmaarray):\n gamma, A = theta\n\n aux=np.sum(np.log(like_one(theta,dtarray,dmagarray,sigmaarray)))\n\n return aux", "def log(x, base=math.e):\n return 0.0", "def log(self): # just use base?\n return Factor().__build( VarSet(self.v) , np.log(self.t) )", "def logp(value, alpha, K):\n logp = -pt.sum(\n pt.log(\n pt.cumsum(\n value[..., ::-1],\n axis=-1,\n )\n ),\n axis=-1,\n )\n logp += -K * betaln(1, alpha)\n logp += alpha * pt.log(value[..., -1])\n\n logp = pt.switch(\n pt.or_(\n pt.any(\n pt.and_(pt.le(value, 0), pt.ge(value, 1)),\n axis=-1,\n ),\n pt.or_(\n pt.bitwise_not(pt.allclose(value.sum(-1), 1)),\n pt.neq(value.shape[-1], K + 1),\n ),\n ),\n -np.inf,\n logp,\n )\n\n return check_parameters(\n logp,\n alpha > 0,\n K > 0,\n msg=\"alpha > 0, K > 0\",\n )", "def logp(self, x):\n pass", "def logg_to_vt_M08(logg):\n return 2.22 - 0.322 * logg", "def log_normal(x, m, log_v):\n ################################################################################\n # TODO: Modify/complete the code here\n # Compute element-wise log probability of normal and remember to sum over\n # the last dimension\n ################################################################################\n # print(\"q_m\", m.size())\n # print(\"q_v\", v.size())\n const = -0.5 * x.size(-1) * torch.log(2 * torch.tensor(np.pi))\n # print(const.size())\n log_det = -0.5 * torch.sum(log_v, dim=-1)\n # print(\"log_det\", log_det.size())\n log_exp = -0.5 * torch.sum((x - m) ** 2 / (log_v.exp()), dim=-1)\n\n log_prob = const + log_det + log_exp\n\n ################################################################################\n # End of code modification\n ################################################################################\n return log_prob", "def __logprob__(self, cv, vsense):\n return 1.0 / (1.0 + np.exp(-np.dot(cv, vsense)))", "def gamrand(alpha, lmbda):\n # (algorithm 4.33).\n\tif alpha > 1:\n\t\td = alpha - 1 / 3\n\t\tc = 1 / np.sqrt(9 * d)\n\t\tflag = 1\n\n\t\twhile flag:\n\t\t\tZ = np.random.randn()\t\n\n\t\t\tif Z > -1 / c:\n\t\t\t\tV = (1 + c * Z)**3\n\t\t\t\tU = np.random.rand()\n\t\t\t\tflag = np.log(U) > (0.5 * Z**2 + d - d * V + d * np.log(V))\n\n\t\treturn d * V / lmbda\n\n\telse:\n\t\tx = gamrand(alpha + 1, lmbda)\n\t\treturn x * np.random.rand()**(1 / alpha)", "def lorentz(x, x0, gamma): \n return (0.5/pi) * gamma / ((x-x0)**2 + 0.25 * gamma**2)", "def logp(value, nu, mu, scale):\n quaddist, logdet, ok = quaddist_parse(value, mu, scale)\n k = floatX(value.shape[-1])\n\n norm = gammaln((nu + k) / 2.0) - gammaln(nu / 2.0) - 0.5 * k * pt.log(nu * np.pi)\n inner = -(nu + k) / 2.0 * pt.log1p(quaddist / nu)\n res = norm + inner - logdet\n\n return check_parameters(res, ok, nu > 0, msg=\"posdef, nu > 0\")", "def log_prior(x):\n logp = (-0.5 * x.pow(2) - torch.tensor(2 * math.pi).sqrt().log()).sum(dim=1)\n return logp", "def get_log(p):\n if p==0:\n return 0.\n return p*np.log2(p)", "def updateMeanAndVar(X, log_gamma, varianceFloor=5.0):", "def get_gamma(self, conv_op):", "def gamma(flag, S, K, t, r, sigma, q):\n\n b = r-q\n\n return numerical_gamma(flag, S, K, t, r, sigma, b, f)" ]
[ "0.6843969", "0.6825065", "0.6679971", "0.66503066", "0.6559735", "0.65503234", "0.65369153", "0.6466907", "0.64395845", "0.6437493", "0.6381946", "0.6371749", "0.63404536", "0.628037", "0.62665665", "0.62616044", "0.62610275", "0.62424546", "0.62359", "0.6202875", "0.6190577", "0.61830497", "0.61651", "0.61577123", "0.6147816", "0.6145877", "0.614216", "0.61370534", "0.6131009", "0.61239326", "0.61080676", "0.61033475", "0.6098961", "0.60968554", "0.6096774", "0.6070641", "0.6062668", "0.6056802", "0.60545343", "0.6052549", "0.60467404", "0.60420835", "0.6041862", "0.6037564", "0.6035142", "0.6030424", "0.60292125", "0.6018121", "0.5999973", "0.59959555", "0.5977107", "0.5972086", "0.5970692", "0.5958464", "0.59563", "0.5947023", "0.59409374", "0.59409374", "0.59320825", "0.591864", "0.59178525", "0.59135914", "0.59080505", "0.59046495", "0.5892051", "0.5890898", "0.58897537", "0.58852303", "0.58852303", "0.5879819", "0.5875789", "0.58709145", "0.5863294", "0.58612555", "0.58550584", "0.58550066", "0.5850033", "0.5849503", "0.5848493", "0.58390284", "0.5836171", "0.58348405", "0.58334005", "0.5832426", "0.58241045", "0.5819991", "0.5811472", "0.580837", "0.58052725", "0.5801374", "0.57938987", "0.5793187", "0.5777514", "0.57723236", "0.5767409", "0.5764495", "0.576281", "0.5758963", "0.5758907", "0.57581824", "0.57569176" ]
0.0
-1
takes in a string of columns and places alternating checkers in those columns, starting with 'X' For example, call b.setBoard('012345') to see 'X's and 'O's alternate on the bottom row, or b.setBoard('000000') to see them alternate in the left column. moveString must be a string of integers
def setBoard( self, moveString ): nextCh = 'X' # start by playing 'X' for colString in moveString: col = int(colString) if 0 <= col <= self.__width: self.addMove(col, nextCh) if nextCh == 'X': nextCh = 'O' else: nextCh = 'X'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setBoard( self, moveString ):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.width:\n self.addMove(col, nextCh)\n if nextCh == 'X': nextCh = 'O'\n else: nextCh = 'X'", "def set_board(self, move_string):\n next_side = \"X\"\n for col_string in move_string:\n col = int(col_string)\n if col >= 0 and col <= self.width:\n self.add_move(col, next_side)\n if next_side == \"X\":\n next_side = \"O\"\n else:\n next_side = \"X\"", "def setBoard(self, moveString):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.width:\n self.addMove(col, nextCh)\n if nextCh == 'X':\n nextCh = 'O'\n else:\n nextCh = 'X'", "def setBoard( self, moveString ):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.__width:\n self.addMove(col, nextCh)\n if nextCh == 'X': \n nextCh = 'O'\n else: nextCh = 'X'", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def handle_move(self, move_string):\n def map_move(move):\n col = int(ascii_lowercase.find(move[0])) + 1 # dummy col\n row = int(move[1:])\n # if not 0 < col <= game[\"board_width\"]:\n # raise ValueError('bad coord; invalid col in ' + coord)\n # if not 0 < row <= game[\"board_height\"]:\n # raise ValueError('bad coord; invalid row in ' + coord)\n return row*(self.rules[\"row_len\"]) + col\n move = list(map(map_move,move_string.split(' ')))\n self.turn[\"board\"][move[0]].make_move(*move[1:])\n self.turn[\"half_move_clock\"] += 1\n if self.turn[\"active_player\"] == 1:\n self.turn[\"full_move_clock\"] += 1\n self.turn[\"active_player\"] = (self.turn[\"active_player\"] + 1) % 2\n # self.turn[\"board\"][move_start].make_move(move_end)", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def make_board(board_string):\n\n letters = board_string.split()\n\n board = [\n letters[0:5],\n letters[5:10],\n letters[10:15],\n letters[15:20],\n letters[20:25],\n ]\n\n return board", "def apply_move(b,player,move):\n move = move.strip().lower()\n if len(move)!=2:\n raise Exception(\"Valid move is two characters (e.g. A2 or B3)\")\n if move[0] not in COLS:\n move = move[::-1]\n if move[0] not in COLS:\n raise Exception(\"No column spec found\")\n j = COLS.index(move[0])\n i = int(move[1])-1\n if b[i][j] != \" \":\n raise Exception(\"Another move already filled that position\")\n b[i][j] = player", "def make_move(board, player_num, row, col):\n board[row][col] = 'X' if player_num == 1 else 'O'", "def make_move(self, board):\n user_input = self.get_user_input(\n 'coordinates of next move (x,y): '\n )\n move = self.transform_user_input(user_input)\n\n valid = board.move_is_valid(move)\n while not valid:\n user_input = self.get_user_input(\n 'Invalid move, coordinate of next move: '\n )\n move = self.transform_user_input(user_input)\n valid = board.move_is_valid(move)\n board.set_piece(move, color=self.color)", "def move(state_int, col, player):\n assert isinstance(state_int, int)\n assert 0 <= col < GAME_COLS\n assert player == PLAYER_BLACK or player == PLAYER_WHITE\n field = decode_binary(state_int)\n assert len(field[col]) < GAME_ROWS\n field[col].append(player)\n # check for victory: the simplest vertical case\n suff = field[col][-COUNT_TO_WIN:]\n won = suff == [player] * COUNT_TO_WIN\n if not won:\n won = _check_won(field, col, 0) or _check_won(field, col, 1) or _check_won(field, col, -1)\n state_new = encode_lists(field)\n return state_new, won", "def test_board_coordinates_toXY():\r\n m = Move()\r\n for col_num, col_name in enumerate(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']):\r\n for row in range(1, 9):\r\n assert m.translate_to_xy(col_name + str(row)) == (Board.SIZE - row, col_num)", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def try_moves(self, moves_set):\n for choice in tuple(moves_set):\n self.game.move(choice)\n self.game.board.create_layout()", "def make_move(self, move):\n if int(move) < 0 or int(move) > 48 or self.board[int(move) // 7][int(move) % 7] != \"\" or int(move) % 2 == 0:\n raise ValueError(\"{} is not a valid move for {}\".format(move, self.board))\n DotsAndBoxesState.score1 += self.check_score(move)\n self.board[int(move) // 7][int(move) % 7] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n self.turn = get_opponent(self.turn) #change into another player's trun", "def make_move(board: Connect4Board) -> \"(row, col)\":\r\n\r\n while True:\r\n\r\n try:\r\n\r\n print('\\nPlease Specify your move. Enter the number column of a cell on the board.')\r\n print('-'*85)\r\n \r\n col = Connect4GameUI.move_col(board)\r\n row = Connect4GameUI._get_valid_row(board, col)\r\n print(row,col)\r\n return row, col\r\n\r\n break\r\n\r\n except:\r\n print('\\nInvalid move!!!')\r\n print('Please try it again.')", "def compile_board(self, moves=None) -> List[List[str]]:\n if not moves:\n moves = self.moves\n board = []\n current_line = []\n for itr in range(1, 10):\n current_line.append(\n self.tokens[moves.get(itr, ' ')]\n )\n if itr % 3 == 0 and current_line:\n board.append(current_line)\n current_line = []\n board.append(current_line)\n return board", "def next_move(self, board):\n\n while True:\n\n i = int(input('Enter a column: ' ))\n\n if board.can_add_to(i) == True:\n break\n\n print('Try again!')\n\n self.num_moves += 1\n\n return i", "def addMove(self, col, ox):\n if self.allowsMove(col) == True:\n R = -1\n for row in self.board:\n if row[col] == '':\n R += 1\n else:\n break\n self.board[R][col] = ox", "def makeMove(self, moveStr):\r\n\t\tmoveStr = str(moveStr)\r\n\r\n\t\tmoveUci = self._userParseSanToUci(moveStr)\r\n\t\t# print(moveUci)\r\n\r\n\t\tif moveUci is None:\r\n\t\t\treturn\r\n\r\n\t\tresponse = requests.post(f'https://lichess.org/api/board/game/{self.gameId}/move/{moveUci}', headers=self.authHeader)\r\n\r\n\t\tif response.status_code == 200:\r\n\t\t\tlog.debug('Move Successfully Sent')\r\n\r\n\t\telse:\r\n\t\t\tlog.warning(f'Move Unsuccessfully Sent. Status Code: {response.status_code}')", "def askMove(self,posibleMoves):\n print(\"Where will you move?\")\n while True:\n pos = raw_input(\"Type Colum and Row 'CR' Ex:a1 for first column/row: \")\n if len(pos) == 2:\n c = ord(pos[0])-97\n r = int(pos[1])-1\n move = c+r*8\n if move in posibleMoves:\n return move\n print(\"Invalid move, try again\")\n return", "def move(self, board):\n\n if board.get_number_of_moves() == 0:\n random_row = randint(0, 2)\n random_column = randint(0, 2)\n\n if random_row == 1 or random_column == 1:\n random_row = 1\n random_column = 1\n elif random_row == 2:\n random_row = board.get_dimension()-1\n\n if random_column == 2:\n random_column = board.get_dimension()-1\n\n move = (random_row, random_column)\n elif board.get_number_of_moves() == 1 or board.get_number_of_moves() == 2:\n if board.get_piece(1,1) == ' ':\n move = (1, 1)\n else:\n board_dimension = board.get_dimension()-1\n corners = [(0, 0), (0, board_dimension), (board_dimension, 0), (board_dimension, board_dimension)]\n corners = self.remove_filled_positions(corners, board)\n\n move = corners[randint(0, len(corners)-1)]\n else:\n move = self.check_for_winner(board)\n\n if move == (-1, -1):\n board_dimension = board.get_dimension()-1\n corner1_moves = self.remove_filled_positions([(0, 0), (2, 2)], board)\n corner2_moves = self.remove_filled_positions([(0, 2), (2, 0)], board)\n\n non_corner_moves = self.remove_filled_positions([(1, 0), (2, 1), (1, 2), (0, 1)], board)\n\n center_piece = board.get_piece(1, 1)\n corner_pieces = [board.get_piece(0, 0), board.get_piece(board_dimension, 0), board.get_piece(0, board_dimension), board.get_piece(board_dimension, board_dimension)]\n\n if corner_pieces[0] != self._piece and corner_pieces[0] != ' ' and corner_pieces[0] == corner_pieces[3]:\n move = non_corner_moves[randint(0, 3)]\n elif corner_pieces[1] != self._piece and corner_pieces[1] != ' ' and corner_pieces[1] == corner_pieces[2]:\n move = non_corner_moves[randint(0, 3)]\n elif len(corner2_moves) > 0 and corner_pieces[0] != self._piece and corner_pieces[0] == center_piece and corner_pieces[3] == self._piece:\n move = corner2_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[1] != self._piece and corner_pieces[1] == center_piece and corner_pieces[2] == self._piece:\n move = corner1_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[2] != self._piece and corner_pieces[2] == center_piece and corner_pieces[1] == self._piece:\n move = corner1_moves[0]\n elif len(corner2_moves) > 0 and corner_pieces[3] != self._piece and corner_pieces[3] == center_piece and corner_pieces[0] == self._piece:\n move = corner2_moves[0]\n else:\n move = self.can_complete_two_in_row(board)\n\n if move == (-1, -1):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def test_valid_move():\n\n board = Board()\n\n # a col outside the width of the board should be false\n assert board.valid_move(board.get_grid_size()[1] + 1) is False\n\n # only positive cols should be considered for a move\n assert board.valid_move(-2) is False\n\n # since board is empty all cols should have moves\n for i in range(board.get_grid_size()[1]):\n assert board.valid_move(i) is True\n\n # if a col is full no move can be made\n for i in range(board.get_grid_size()[1]):\n if i % 2 == 0:\n board.move(board.P1, 0)\n else:\n board.move(board.P2, 0)\n\n \"\"\"\n board now looks like this...\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|O|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|X|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|O|-|-|-|-|-|-|2\n +-+-+-+-+-+-+-+\n 3|X|-|-|-|-|-|-|3\n +-+-+-+-+-+-+-+\n 4|O|-|-|-|-|-|-|4\n +-+-+-+-+-+-+-+\n 5|X|-|-|-|-|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n\n \"\"\"\n assert board.valid_move(0) is False", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)", "def test_valid_moves(self):\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move choice 0\n move_choice = 0\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((5, 4), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S . . . .\n . . . x . . . .\n . . . x K . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n #\n # set move choice 5\n move_choice = 5\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((3, 5), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S x K . .\n . . . x x . . .\n . . . x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n #\n # set move choice 6\n move_choice = 6\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((2, 7), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . K\n . . . S x S x x\n . . . x x . . .\n . . . x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n #\n # set move choice 3\n move_choice = 3\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((1, 5), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . K . .\n . . . . . x x S\n . . . S x S x x\n . . . x x . . .\n . . . x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n #\n # set move choice 2\n move_choice = 2\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((2, 3), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . x x S . .\n . . . K . x x S\n . . . S x S x x\n . . . x x . . .\n . . . x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n #\n # reset board\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move choice 7\n move_choice = 7\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((4, 5), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S x x . .\n . . . . . K . .\n . . . . . . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n #\n # set move choice 1\n move_choice = 1\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((6, 4), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S x x . .\n . . . . . S . .\n . . . . . x . .\n . . . . K x . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n #\n # set move choice 4\n move_choice = 4\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((4, 3), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S x x . .\n . . . K x S . .\n . . . . x x . .\n . . . . S x . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)", "def test_check_move_with_invalid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] * 6,\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 4)\n self.assertFalse(valid)", "def human_go(self, board):\r\n coord_pattern = re.compile(\"[0-{}]$\".format(board.shape[1]))\r\n print(\"Enter Column and press enter.\")\r\n input_str = input(\"(from 0-6)\\n\")\r\n if not coord_pattern.match(input_str):\r\n print(\"That is not in the right format, please try again...\")\r\n return self.human_go(board)\r\n else:\r\n col = int(input_str)\r\n if board[0][col] != 0:\r\n print(\"That column is already full, please try again\")\r\n self.human_go()\r\n else:\r\n for row in board[::-1]:\r\n if row[col] == 0:\r\n row[col] = -1\r\n return board", "def move(self, board):\n\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def test_check_move_with_valid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] + [\" \"] * 5,\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 3)\n self.assertTrue(valid)", "def move(self, direction):\r\n direc = list(OFFSETS[direction])\r\n line = []\r\n dummy_board = self.board[:]\r\n if direction == 3:\r\n for i in range(self.height):\r\n self.board[i] = merge(self.board[i])\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n elif direction == 4:\r\n for i in range(self.height):\r\n line = self.board[i][::-1]\r\n self.board[i] = merge(line)\r\n self.board[i] = self.board[i][::-1]\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n \r\n elif direction == 1 or 2:\r\n dummy_board = str(self.board[:])\r\n if direction == 1:\r\n tile = [0,0]\r\n elif direction == 2:\r\n tile = [self.height - 1, 0]\r\n for i in range(self.width):\r\n tile2 = tile[:]\r\n while len(line) < self.height:\r\n line.append(self.get_tile(*tile2))\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n line = merge(line)\r\n tile2 = tile[:]\r\n for i in range(self.height):\r\n self.set_tile(*(tile2+[line[0]]))\r\n line.remove(line[0])\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n tile = [x+y for x,y in zip(tile, [0,1])]\r\n if dummy_board != self.__str__():\r\n self.new_tile()\r\n return self.board", "def move(x, y, direction, board):\n\n piece_at_xy = starter.get_piece(x, y, board); # Getting necessary pieces\n\n assert piece_at_xy != '*', \"Error in swipe logic\"; # Logical debug case\n valid_direction = (direction == \"left\" or\n direction == \"right\" or\n direction == \"up\" or\n direction == \"down\");\n assert valid_direction, \"Invalid direction passed in\"; # Logical debug case\n\n # The new x and y for the current piece (adjacent's current position) are stored alongside adjacent (fewer ifs + redundant code)\n if direction == \"left\":\n adjacent = (starter.get_piece(x - 1, y, board), x - 1, y);\n elif direction == \"right\":\n adjacent = (starter.get_piece(x + 1, y, board), x + 1, y);\n elif direction == \"up\":\n adjacent = (starter.get_piece(x, y - 1, board), x, y - 1);\n elif direction == \"down\":\n adjacent = (starter.get_piece(x, y + 1, board), x, y + 1);\n\n if adjacent[0] == None: # Edge of the board case (no action taken)\n return False;\n\n elif piece_at_xy != adjacent[0] and adjacent[0] != '*': # Can't combine two numbers case (no action taken)\n return False;\n\n elif adjacent[0] == '*': # Empty spot adjacent case (recursive movement in direction)\n starter.place_piece('*', x, y, board);\n starter.place_piece(piece_at_xy, adjacent[1], adjacent[2], board);\n move(adjacent[1], adjacent[2], direction, board);\n return True;\n\n elif piece_at_xy == adjacent[0]: # Adjacent same numbers case (combine them)\n starter.place_piece('*', x, y, board);\n starter.place_piece(str(int(adjacent[0]) * 2), adjacent[1], adjacent[2], board);\n move(adjacent[1], adjacent[2], direction, board);\n return True;\n\n else:\n # Logical debug case\n assert False, \"No way you should be in here. Error in move logic\";\n\n return False;", "def test_move_knight_illegally(self):\n self.c.board = [[(0, 0) for i in range(8)] for i in range(8)]\n for piece in [('N', True), ('N', False)]:\n self.c.turn = piece[1]\n self.c.board[4][4] = piece\n dests = [col + row for col in 'abcdefgh' for row in '12345678']\n for dest in dests:\n if dest in ['d6', 'f6', 'c5', 'g5', 'c3', 'g3', 'd2', 'f2']:\n continue\n self.groups['dest'] = dest\n self.assertRaises(\n MoveNotLegalError, self.c._knight_evaluator, self.groups)", "def _ai_move(self):\n move = self.AI_MOVES[self.game_board.get_string_board()][0]\n self.game_board.move_pieces(start=move[\"start\"], end=move[\"end\"])\n\n self.turn_value_text = \"You (Black)\"\n self.selected_piece_value_text = f\"N/A\"\n self.selected_move = -1\n\n self._sync_gui()", "def apply_move(board_state, move, side):\n move_x, move_y = move\n\n def get_tuples():\n for x in range(len(board_state)):\n if move_x == x:\n temp = list(board_state[x])\n temp[move_y] = side\n yield tuple(temp)\n else:\n yield board_state[x]\n\n return tuple(get_tuples())", "def make_move(self, column):\r\n trans_board = numpy.transpose(self.__board[::1]) # transpose the\r\n # board so that columns are now arrays\r\n if 0 not in trans_board[column] or self.get_winner() or column >= \\\r\n self.BOARD_COLUMNS or column < 0:\r\n # column is full, illegal or the game is already finished\r\n return self.ILLEGAL_MOVE # exception?\r\n else:\r\n reversed_col = list(reversed(trans_board[column]))\r\n for hole in reversed_col:\r\n if hole == 0:\r\n row_i = self.BOARD_ROWS - 1 - reversed_col.index(hole)\r\n self.__board[row_i][column] = self.__cur_player\r\n winner = self.get_winner()\r\n if winner: # is not none\r\n return winner\r\n self.__switch_player()", "def test_string(self):\n expected_empty_board_string = ' | | \\n---------\\n | | \\n---------\\n | | '\n self.assertEqual(str(self.game), expected_empty_board_string)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game._board[row][col] = PLAYERX\n expected_x_board_string = 'X | X | X\\n---------\\nX | X | X\\n---------\\nX | X | X'\n self.assertEqual(str(self.game), expected_x_board_string)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game._board[row][col] = PLAYERO\n expected_o_board_string = 'O | O | O\\n---------\\nO | O | O\\n---------\\nO | O | O'\n self.assertEqual(str(self.game), expected_o_board_string)", "def next_move(self, board):\r\n lc = [x for x in range(board.width) if board.can_add_to(x)]\r\n column = random.choice(lc)\r\n self.num_moves += 1\r\n return column", "def place_piece(self, move, piece):\n self.totMoves+=1\n print(move)\n if len(move) > 1:\n self.board[move[1][0]][move[1][1]] = ' '\n self.board[move[0][0]][move[0][1]] = piece", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res", "def move(self, board):\n # first, make your turn:\n currentState = board[self.x,self.y]\n turnDir = self.rule[(currentState + 1) % len(self.rule)]\n self.turn( int(turnDir) )\n # next, let's change this cell's state:\n if currentState >= len(self.rule) - 1:\n board[self.x,self.y] = 0\n else:\n board[self.x,self.y] = currentState + 1\n # and let's move:\n offsets = self.nextPositionOffset() # based on x, y, and dir\n self.x, self.y = board.move(self.x, self.y, offsets[0], offsets[1])", "def next_move(board, player):\n \n move_row = \"move\"\n move_column = \"move\"\n\n while not move_row.isnumeric():\n move_row = input(\"{}, pick row to place your {}. > \".format(player.name, player.char))\n while not move_column.isnumeric(): \n move_column = input(\"Pick column in row {} to place your {}. > \".format(move_row, player.char))\n\n move_row = int(move_row)\n move_column = int(move_column)\n\n move = Move(player, (move_row, move_column))\n \n # Check if move is out of bounds\n if (move_row >= len(board.current_board) or\n move_column >= len(board.current_board)):\n print(\"Move out of bounds. Choose a valid move.\")\n return board\n\n # Check if space is already used\n if board.current_board[move_row][move_column] != \"-\":\n print(\"Spot already played. Pick an unused space.\")\n return board\n\n board.last_move = player.name\n board.add_move(move)\n\n return board", "def make_move(self, board: Board) -> int:\n\n move = input()\n move = int(move)\n\n while move not in board.get_valid_moves():\n print(\"That is not a valid move\")\n move = input()\n move = int(move)\n\n return move", "def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer", "def add_move(self, move):\n \n self.current_board[move.position[0]][move.position[1]] = move.player.char", "def make_move(self, row, column):\n\t\tif self.board[int(row)][int(column)] == '-':\n\t\t\tself.board[int(row)][int(column)] = self.marker\n\t\telse:\n\t\t\tprint(\"That spot is occupied, you messed up, you lose your turn for doing bad things\")", "def apply_move(self, move, state):\n x, y , heading, grid_data = state\n map_data = [row[:] for row in grid_data]\n if move == self.MOVE_FORWARD:\n # get coordinates for next cell\n if heading == self.UP:\n next_y = y - 1\n next_x = x\n elif heading == self.DOWN:\n next_y = y + 1\n next_x = x\n elif heading == self.LEFT:\n next_y = y\n next_x = x - 1\n else:\n next_y = y\n next_x = x + 1\n\n # handle special tile types\n if map_data[next_y][next_x] == self.ICE_SYMBOL:\n # handle ice tile - slide until first non-ice tile or blocked\n if heading == self.UP:\n for i in range(next_y, -1, -1):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i + 1\n break\n else:\n next_y = i\n break\n elif heading == self.DOWN:\n for i in range(next_y, self.y_size):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i - 1\n break\n else:\n next_y = i\n break\n elif heading == self.LEFT:\n for i in range(next_x, -1, -1):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i + 1\n break\n else:\n next_x = i\n break\n else:\n for i in range(next_x, self.x_size):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i - 1\n break\n else:\n next_x = i\n break\n if map_data[next_y][next_x] == self.TELEPORT_SYMBOL:\n # handle teleport - find the other teleporter\n tpy, tpx = (None, None)\n for i in range(self.y_size):\n for j in range(self.x_size):\n if map_data[i][j] == self.TELEPORT_SYMBOL and (i != next_y or j != next_x):\n tpy, tpx = (i, j)\n break\n if tpy is not None:\n break\n if tpy is None:\n raise Exception(\"LaserTank Map Error: Unmatched teleport symbol\")\n next_y, next_x = (tpy, tpx)\n else:\n # if not ice or teleport, perform collision check\n if self.cell_is_blocked(next_y, next_x, map_data):\n return self.COLLISION\n\n # check for game over conditions\n if self.cell_is_game_over(next_y, next_x, map_data):\n return self.GAME_OVER\n\n # no collision and no game over - update player position\n y = next_y\n x = next_x\n return (x, y, heading, map_data)\n\n elif move == self.TURN_LEFT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.LEFT\n elif heading == self.DOWN:\n heading = self.RIGHT\n elif heading == self.LEFT:\n heading = self.DOWN\n else:\n heading = self.UP\n return (x, y, heading, map_data)\n\n elif move == self.TURN_RIGHT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.RIGHT\n elif heading == self.DOWN:\n heading = self.LEFT\n elif heading == self.LEFT:\n heading = self.UP\n else:\n heading = self.DOWN\n return (x, y, heading, map_data)\n\n elif move == self.SHOOT_LASER:\n # set laser direction\n if heading == self.UP:\n laserheading = self.UP\n dy, dx = (-1, 0)\n elif heading == self.DOWN:\n laserheading = self.DOWN\n dy, dx = (1, 0)\n elif heading == self.LEFT:\n laserheading = self.LEFT\n dy, dx = (0, -1)\n else:\n laserheading = self.RIGHT\n dy, dx = (0, 1)\n\n # loop until laser blocking object reached\n ly, lx = (y, x)\n while True:\n ly += dy\n lx += dx\n\n # handle boundary and immovable obstacles\n if ly < 0 or ly >= self.y_size or \\\n lx < 0 or lx >= self.x_size or \\\n map_data[ly][lx] == self.OBSTACLE_SYMBOL:\n # laser stopped without effect\n return self.COLLISION\n\n # handle movable objects\n elif self.cell_is_laser_movable(ly, lx, laserheading, map_data):\n # check if tile can be moved without collision\n if self.cell_is_blocked(ly + dy, lx + dx, map_data) or \\\n map_data[ly + dy][lx + dx] == self.ICE_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.TELEPORT_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.FLAG_SYMBOL or \\\n (ly + dy == y and lx + dx == x):\n # tile cannot be moved\n return self.COLLISION\n else:\n old_symbol = map_data[ly][lx]\n map_data[ly][lx] = self.LAND_SYMBOL\n if map_data[ly + dy][lx + dx] == self.WATER_SYMBOL:\n # if new bridge position is water, convert to land tile\n if old_symbol == self.BRIDGE_SYMBOL:\n map_data[ly + dy][lx + dx] = self.LAND_SYMBOL\n # otherwise, do not replace the old symbol\n else:\n # otherwise, move the tile forward\n map_data[ly + dy][lx + dx] = old_symbol\n break\n\n # handle bricks\n elif map_data[ly][lx] == self.BRICK_SYMBOL:\n # remove brick, replace with land\n map_data[ly][lx] = self.LAND_SYMBOL\n break\n\n # handle facing anti-tanks\n elif (map_data[ly][lx] == self.ANTI_TANK_UP_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.ANTI_TANK_DOWN_SYMBOL and laserheading == self.UP) or \\\n (map_data[ly][lx] == self.ANTI_TANK_LEFT_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.ANTI_TANK_RIGHT_SYMBOL and laserheading == self.LEFT):\n # mark anti-tank as destroyed\n map_data[ly][lx] = self.ANTI_TANK_DESTROYED_SYMBOL\n break\n\n # handle player laser collision\n elif ly == y and lx == x:\n return self.GAME_OVER\n\n # handle facing mirrors\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.LEFT):\n # new direction is up\n dy, dx = (-1, 0)\n laserheading = self.UP\n elif (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.RIGHT) or \\\n (self.grid_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.LEFT):\n # new direction is down\n dy, dx = (1, 0)\n laserheading = self.DOWN\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.UP):\n # new direction is left\n dy, dx = (0, -1)\n laserheading = self.LEFT\n elif (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.UP):\n # new direction is right\n dy, dx = (0, 1)\n laserheading = self.RIGHT\n # do not terminate laser on facing mirror - keep looping\n\n # check for game over condition after effect of laser\n if self.cell_is_game_over(y, x, map_data):\n return self.GAME_OVER\n return (x, y, heading, map_data)\n return self.SUCCESS", "def set_board(board):", "def place_piece(self, move, piece):\n if len(move) > 1:\n self.board[move[1][0]][move[1][1]] = ' '\n self.board[move[0][0]][move[0][1]] = piece", "def apply_move(self, move):\n if self.check_move(move=move):\n self.board_list[move] = self.current_player.marker # changes value in the board to player which is either X or O\n self.moves_made += str(move) # keeps track of all moves\n return True\n else:\n return False", "def display_board(board_state):\n\n if type(board_state) != str:\n raise TypeError('Given board input must be String')\n\n if len(board_state) != 9:\n raise Exception(\"Input board string length is not 9\")\n\n counter = 0\n # print()\n for position in board_state:\n counter += 1\n if counter % 3 == 0:\n \n if counter != 9:\n paddingString = \"\\n---------\\n\"\n else:\n paddingString = ''\n else:\n paddingString = \" | \"\n\n if position.isnumeric():\n print(\" \", end=paddingString)\n\n else:\n print(position, end=paddingString)\n\n print(\"\\n\\n\")", "def place_piece(self, move, piece):\r\n if len(move) > 1:\r\n self.board[move[1][0]][move[1][1]] = ' '\r\n self.board[move[0][0]][move[0][1]] = piece", "def play_move(board, move):\n\tboard_copy = list(board)\n\n\tboard_copy[move] = 'o'\n\treturn ''.join(board_copy)", "def update_board(self, move):\n #new_move equals the gird with selection(Which is the players input)\n new_move = self.grid[move]\n\n # check if column selected by player is full if the first index (top) has a game piece\n if new_move[0] != \" \" :\n return True\n\n # this will get the correct column and add the player's move\n # subtract player column selection by 1 to select correct column\n adjustment = -1\n while new_move[adjustment] != \" \":\n adjustment -= 1\n\n # update the grid with the selected column by the player\n new_move[adjustment] = self.playing_player[1]\n return False", "def validate_move(board: list, character: list, direction: str) -> bool:\n max_x_y_coordinates = board[-1]\n valid_options = []\n if character[1] < max_x_y_coordinates[0]:\n valid_options.append(\"d\")\n if character[1] > 0:\n valid_options.append(\"a\")\n if character[0] < max_x_y_coordinates[1]:\n valid_options.append(\"s\")\n if character[0] > 0:\n valid_options.append(\"w\")\n if direction in valid_options:\n return True\n else:\n return False", "def test_move_knight_legally(self):\n self.c.board[6] = [(0, 0) for i in range(8)]\n for piece in [('N', True), ('N', False)]:\n self.c.turn = piece[1]\n self.c.board[4][4] = piece\n for dest in ['d6', 'f6', 'c5', 'g5', 'c3', 'g3', 'd2', 'f2']:\n self.groups['dest'] = dest\n self.assertEqual(self.c._knight_evaluator(self.groups), (4, 4))", "def random_move(board):\n\tpossible_moves = []\n\tboard_copy = list(board)\n\n\tfor count, player in enumerate(board):\n\t\tif player == ' ':\n\t\t\tpossible_moves.append(count)\n\n\tif len(possible_moves) != 0:\n\t\tmove = random.choice(possible_moves)\n\t\tboard_copy[move] = 'o'\n\n\t\treturn ''.join(board_copy)\n\t\n\telse:\n\t\treturn board", "def move(self, row, column, piece):\n\n if row < 0 or row >= self._dimension or column < 0 or column >= self._dimension or self._board[row][column] != ' ':\n print('Move cannot be made')\n return False\n else:\n self._board[row][column] = piece\n self._number_of_moves += 1", "def make_move(self, move, player, board):\r\n #nBoard = board.copy()\r\n board[move] = player\r\n for d in core.DIRECTIONS:\r\n if self.find_bracket(move, player, board, d)!=None:\r\n self.make_flips(move, player, board, d)\r\n return board", "def move_in_a_corner(self, board):\n return self.choose_random_move_from_list(board, [\n board.letters[0] + board.numbers[0],\n board.letters[0] + board.numbers[board.size - 1],\n board.letters[board.size - 1] + board.numbers[0],\n board.letters[board.size - 1] + board.numbers[board.size - 1]])", "def test_check_move_with_barely_valid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] * 5 + [\" \"],\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 4)\n self.assertTrue(valid)", "def execute_move(board, move):\n\n player, spike_index, fields_to_move = Judge._validate_move(move)\n\n board.set_player_perspective(player)\n \n if spike_index == OUT_OF_BAR_SPECIAL_MOVE:\n dest_spike_index = fields_to_move - 1\n board.remove_checker_from_bar()\n else:\n dest_spike_index = spike_index + fields_to_move\n board.pop_player_checker(spike_index)\n\n if dest_spike_index >= len(INITIAL_SPIKES_STATE):\n return board\n\n board.push_player_checker(dest_spike_index)\n\n return board", "def is_valid_move(board,row,col,digit):\n if(is_valid_sudoku_board(board)==False):\n return False\n if row>8 or row<0 or col>8 or col<0 or digit<1 or digit>9:#checks that the row, col, digit is valid\n return False\n if board[row][col]!=0:#checks that you are trying to insert your digit to a valid place\n return False\n for i in range(len(board)):\n if board[row][i]==digit:#checks that your digit isn't in the row\n return False\n if board[i][col]==digit:#checks that your digit isn't in the col\n return False\n for srow in range(3*(row//3),3*(row//3)+3):\n for scol in range(3*(col//3),3*(col//3)+3): \n if board[srow][scol]==digit: #checks that your digit isn't in the block\n return False\n return True", "def moveable(self, board):\n # horizontal\n if self.direction == \"horizontal\":\n # the position to which the car wants to move is either 1 more or 1 less column wise\n right = self.get_cols()[1] + self.size - 1\n left = self.get_cols()[0] - 1\n # check if right or left is out of the boards margins \n if right > board.width_height:\n move_left = board.positions[self.get_rows()[0]][left]\n move_right = None\n elif left < 0:\n move_right = board.positions[self.get_rows()[0]][right]\n move_left = None\n else: \n move_right = board.positions[self.get_rows()[0]][right]\n move_left = board.positions[self.get_rows()[0]][left]\n\n # try to move left and right\n if move_right == \"x\" and move_left == \"x\":\n return \"leftright\"\n elif move_right == \"x\":\n return \"right\"\n elif move_left == \"x\":\n return \"left\"\n else: \n return \"none\"\n \n # vertical\n else:\n up = self.get_rows()[0] - 1\n #print(up)\n down = self.get_rows()[1] + self.size - 1\n # check if up or down is out of the boards margins \n if up < 0:\n # no room on the board for upward movement\n move_down = board.positions[down][self.get_cols()[0]]\n move_up = None\n elif down > board.width_height:\n # no room on the board for downward movement\n move_up = board.positions[up][self.get_cols()[0]]\n move_down = None\n else:\n # both up and down are possible positions on the board\n move_up = board.positions[up][self.get_cols()[0]]\n move_down = board.positions[down][self.get_cols()[0]]\n\n # try to move up and down\n if move_down == \"x\" and move_up == \"x\":\n return \"updown\"\n elif move_up == \"x\":\n return \"up\"\n elif move_down == \"x\":\n return \"down\"\n else: \n return \"none\"", "def move(direction: str, board : list) -> list:\n board_length = len(board)\n x, y = find_empty_space(board)\n \n increment_x = 0 \n increment_y = 0\n\n if direction == Direction.Up:\n increment_x, increment_y = Coordinate.Up.value\n elif direction == Direction.Down:\n increment_x, increment_y = Coordinate.Down.value\n elif direction == Direction.Left:\n increment_x, increment_y = Coordinate.Left.value\n elif direction == Direction.Right:\n increment_x, increment_y = Coordinate.Right.value\n\n x_new = x + increment_x\n y_new = y + increment_y\n\n is_valid = is_valid_move(x_new, y_new, board_length)\n\n if is_valid: \n temp = board[x][y]\n board[x][y] = board[x_new][y_new]\n board[x_new][y_new] = temp\n return board\n return None", "def moved_board(board):\n return legal_move_on(board=board).map(\n lambda (start, end): board.move(start=start, end=end),\n )", "def attack(self, somerow, somecol):\n valid_move = True\n for i in range(self.size):\n if self.is_valid_position(somerow, i):\n if self.board[somerow][i] != \"0\":\n #checks the same row\n valid_move = False\n for i in range(self.size):\n if self.is_valid_position(i, somecol):\n if self.board[i][somecol] != \"0\":\n #checks the same column\n valid_move = False \n for i in range(self.size):\n if self.is_valid_position(somerow+i, somecol+i):\n if self.board[somerow+i][somecol+i] != \"0\":\n #checks diagonal\n valid_move = False\n for i in range(self.size):\n if self.is_valid_position(somerow+i, somecol-i):\n if self.board[somerow+i][somecol-i] != \"0\":\n valid_move = False\n for i in range(self.size):\n if self.is_valid_position(somerow-i, somecol+i):\n if self.board[somerow-i][somecol+i] != \"0\":\n valid_move = False\n for i in range(self.size):\n if self.is_valid_position(somerow-i, somecol-i):\n if self.board[somerow-i][somecol-i] != \"0\":\n valid_move = False\n return valid_move", "def test_str(self):\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n assert str(board) == 'O|X| \\n |X| \\n | | \\n'", "def human_move(self):\n move = -1\n while move < 1 or move > self.BOARD.COLUMNS:\n try:\n move = input(\"{}: Choose a column>>> \".format(self.NAME))\n\n for i in self.QUIT:\n if str(move) == i:\n return None\n\n move = int(move)\n\n except KeyboardInterrupt:\n exit(0)\n except ValueError:\n pass\n if self.PIECE_COUNT <= 0:\n # cannot do anything\n self.STATE == Spectator.State.INACTIVE\n return None\n else:\n return move", "def legalMoves(self):\n moves = []\n indexOfZero = self.tiles.index(0)\n \n if indexOfZero == 0:\n moves.append('Down')\n moves.append('Right')\n elif indexOfZero == 1:\n moves.append('Down')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 2:\n moves.append('Down')\n moves.append('Left')\n elif indexOfZero == 3:\n moves.append('Up')\n moves.append('Down')\n moves.append('Right')\n elif indexOfZero == 4:\n moves.append('Up')\n moves.append('Down')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 5:\n moves.append('Up')\n moves.append('Down')\n moves.append('Left')\n elif indexOfZero == 6:\n moves.append('Up')\n moves.append('Right')\n elif indexOfZero == 7:\n moves.append('Up')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 8:\n moves.append('Up')\n moves.append('Left')\n else:\n print('something wrong with board')\n return moves", "def parse_move_to_square(self, uci_move: str):\n chars = utils.split_string_to_chars(uci_move)\n square_from = ''.join(chars[0] + chars[1])\n square_to = ''.join(chars[2] + chars[3])\n return square_from, square_to", "def is_valid_move(board, picked_column):\n if picked_column < 0 or picked_column >= len(board[0]):\n return False\n for row in range(len(board)):\n if board[row][picked_column] == 0:\n return True\n return False", "def move(self, row, column, symbol):\n game_state = self.determine_game_state()\n if game_state not in (GameState.GAME_NOT_STARTED, GameState.GAME_IN_PROGRESS):\n return MoveResults.MOVE_INVALID\n\n # check for initial move\n if self.board == BLANK_BOARD and symbol == O_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n # check for invalid row and column\n if row < 0 or row > 2 or column < 0 or column > 2:\n return MoveResults.MOVE_INVALID\n\n # make sure the game piece is valid\n if symbol != X_SYMBOL and symbol != O_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n # make sure the game piece isn't moving out of turn\n x_moves = self.board.count(X_SYMBOL)\n o_moves = self.board.count(O_SYMBOL)\n if symbol == X_SYMBOL and x_moves > o_moves:\n return MoveResults.MOVE_INVALID\n elif symbol == O_SYMBOL and o_moves >= x_moves:\n # note that x always goes first.\n return MoveResults.MOVE_INVALID \n\n # figure out position.\n position = (3 * row) + column\n\n # make sure there's not already a piece there.\n if self.board[position] != EMPTY_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n self.board = self.board[:position] + symbol + self.board[position+1:] \n return MoveResults.MOVE_VALID", "def update_board(board: Board, move: Move) -> Board:\n old_position = move[0]\n new_position = move[1]\n character = board[old_position[0]][old_position[1]]\n board = change_position(board, new_position, character)\n board = clear_position(board, old_position)\n \n return board", "def valid_move(board, row, col):\n return board[row][col] == '-'", "def move_x(self, row, column):\n\n #returns false if game has already been won\n if self._game_state != \"UNFINISHED\":\n return False\n\n # checks if x tries to move out of bounds\n if row not in range(8) or column not in range(8):\n return False\n\n # returns false/invalid move if x tries to move more than one row at a time or\n # non diagonal\n if (row - self._current_x_row) > 1 or (column - self._current_x_column) > 1 or (\n self._current_x_row - row) > 1 or (self._current_x_column - column) > 1:\n return False\n\n if self._current_x_column == column:\n return False\n\n if self._current_x_row == row:\n return False\n\n if \"o\" in self._board[row][column]:\n return False\n\n #places x in the specified row and column if the move is legal\n else:\n self._board[self._current_x_row].remove(\"x\")\n self._board[self._current_x_row].append(\"\")\n self._board[row][column] = \"x\"\n self._current_x_row = row\n self._current_x_column = column\n self._current_row += 1\n self._lower_right = (self._current_x_row + 1, self._current_x_column + 1)\n self._lower_left = (self._current_x_row + 1, self._current_x_column - 1)\n self._upper_right = (self._current_x_row - 1, self._current_x_column + 1)\n self._upper_left = (self._current_x_row - 1, self._current_x_column - 1)\n self._row1 = (\n self._board[0][0],\n self._board[1][0],\n self._board[2][0],\n self._board[3][0],\n self._board[4][0],\n self._board[5][0],\n self._board[6][0],\n self._board[7][0])\n\n self._row7 = (\n self._board[0][7],\n self._board[1][7],\n self._board[2][7],\n self._board[3][7],\n self._board[4][7],\n self._board[5][7],\n self._board[6][7],\n self._board[7][7])\n\n\n # checks if four \"o\" pieces surrounds x, if so, then x has no more moves and o wins\n if \"x\" not in self._board[7]:\n if \"o\" in self._board[self._lower_right[0]][self._lower_right[1]] and \"o\" in \\\n self._board[self._lower_left[0]][\n self._lower_left[1]] and \"o\" in self._board[self._upper_right[0]][\n self._upper_right[1]] and \"o\" in \\\n self._board[self._upper_left[0]][self._upper_left[1]]:\n self._game_state = \"O_WON\"\n\n # checks if x is in the last column and o pieces surrounds x, x loses\n if \"x\" in self._row7 and \"o\" in self._board[self._lower_left[0]][self._lower_left[1]] and \"o\" in \\\n self._board[self._upper_left[0]][self._upper_left[1]]:\n self._game_state = \"O_WON\"\n\n # checks if x is in the first row and o surrounds x, x loses\n if \"x\" in self._board[0] and \"o\" in self._board[self._lower_right[0]][self._lower_right[1]] and \"o\" in \\\n self._board[self._lower_left[0]][self._lower_left[1]]:\n self._game_state = \"O_WON\"\n\n # checks if x is in the first column and o pieces surrounds x, x loses\n if \"x\" in self._row1 and \"o\" in self._board[self._lower_right[0]][self._lower_right[1]] and \"o\" in \\\n self._board[self._upper_right[0]][self._upper_right[1]]:\n self._game_state = \"O_WON\"\n\n # winning condition for \"x\" piece upon reaching last row\n if \"x\" in self._board[7]:\n self._game_state = \"X_WON\"\n\n return True", "def set_move(x, y, player, xi, yi):\r\n if valid_move(x, y):\r\n board[x][y] = player\r\n board[xi][yi] = 0\r\n return True\r\n else:\r\n return False", "def update_board(self,move, _testing : bool = True ) -> bool :\r\n\r\n temp = self.board.copy()\r\n self.count = 0\r\n\r\n for direction in DIRECTIONS:\r\n self.update_direction(move,direction)\r\n\r\n if self.count == 0:\r\n self.board = temp\r\n return False\r\n else:\r\n if _testing:\r\n self.board = temp\r\n else:\r\n self.board[move[0],move[1]] = self.turn\r\n return True", "def get_valid_moves(x, y, path):\n \n valids = \"\"\n \n # First compute the hash\n digest = md5(passcode + path).hexdigest()\n \n # Check Up\n if y != 0 and digest[0] in \"bcdef\":\n valids += 'U'\n \n # Check Down\n if y != 3 and digest[1] in \"bcdef\":\n valids += 'D'\n \n # Check Left\n if x != 0 and digest[2] in \"bcdef\":\n valids += 'L'\n \n # Check Right\n if x != 3 and digest[3] in \"bcdef\":\n valids += 'R'\n \n return valids", "def opponent_move(self, move):\n # validate input\n if len(move) > 1:\n source_row = move[1][0]\n source_col = move[1][1]\n if source_row != None and self.board[source_row][source_col] != self.opp:\n self.print_board()\n print(move)\n raise Exception(\"You don't have a piece there!\")\n if abs(source_row - move[0][0]) > 1 or abs(source_col - move[0][1]) > 1:\n self.print_board()\n print(move)\n raise Exception('Illegal move: Can only move to an adjacent space')\n if self.board[move[0][0]][move[0][1]] != ' ':\n raise Exception(\"Illegal move detected\")\n # make move\n self.place_piece(move, self.opp)", "def parse_move(move):\n if not (len(move) == 2):\n return None, None\n try:\n row = ord(move[0].upper()) - 65\n col = int(move[1])\n except:\n return None, None\n return row, col", "def make_move(board, position, player):\n # only valid moves are passed in here\n board[position-1] = player", "def _EAN_coords_to_board_coords(EAN_move: str) -> (int, int):\n assert EAN_move[0] in \"abcdefgh\" and EAN_move[1] in \"12345678\", \"failed to get \" + EAN_move\n\n\n col = ord(EAN_move[0]) - ord('a')\n row = 8 - int(EAN_move[1])\n return row, col", "def make_move(board, picked_column, player):\n row = find_first_free_cell(board, picked_column)\n board[row][picked_column] = player\n return board, row", "def win(s):\r\n\r\n # check across\r\n for i in range(3):\r\n if board[0 + 3 * i] == board[1 + 3 * i] == board[2 + 3 * i] == s:\r\n board[0 + 3 * i] = board[1 + 3 * i] = board[2 + 3 * i] = '#'\r\n return True\r\n\r\n # check down\r\n for i in range(3):\r\n if board[i] == board[i + 3] == board[i + 6] == s:\r\n board[i] = board[i + 3] = board[i + 6] = '#'\r\n return True\r\n\r\n # check diagonal right\r\n if board[0] == board[4] == board[8] == s:\r\n board[0] = board[4] = board[8] = '#'\r\n return True\r\n\r\n # check diagonal left\r\n if board[6] == board[4] == board[2] == s:\r\n board[6] = board[4] = board[2] = '#'\r\n return True\r\n\r\n return False", "def make_move(board, move, ch):\n board[move['row']][move['col']] = ch\n \n winner = board_winner(board)\n \n if winner is not None:\n return True, winner\n \n if not board_has_move(board):\n return True, None\n \n return False, None", "def calculate_move(board):\n valid_columns = []\n for col in range(board.columncount()):\n try:\n _ok = validate_column(board, col)\n valid_columns.append(col)\n except InvalidColumn:\n pass\n final_col = int(random.choice(valid_columns))\n return final_col", "def move(self, board, move_dir):\n if move_dir == \"right\":\n # failsafe: do not move through other cars on board\n if board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] == 'r':\n print(\"No movement!\")\n return board\n \n # give board correct new positions (characters)\n else:\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\"\n\n # change car objects positions\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) + 1)\n return board\n elif move_dir == \"left\": \n if board.positions[self.get_rows()[0]][self.get_cols()[0] - 1].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] == 'r':\n print(\"No movement!\")\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 2)] = \"x\"\n\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) - 1)\n return board\n elif move_dir == \"up\":\n #print(board.positions[self.get_rows()[0] - 1][self.get_cols()[0]])\n if board.positions[self.get_rows()[0] - 1][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n else:\n board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] = self.name[0]\n board.positions[self.get_rows()[1] + (self.size - 2)][self.get_cols()[0]] = \"x\"\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) - 1) + \".\" + str(self.get_cols()[0])\n\n #print(board)\n return board\n elif move_dir == \"down\": \n try: \n if board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n except IndexError:\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\" \n board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] = self.name[0]\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) + 1) + \".\" + str(self.get_cols()[0]) \n \n #print(self.position)\n #print(board)\n \n return board\n else:\n #print(\"NO MOVEMENT!\")\n return board", "def possible_moves(state_int):\n assert isinstance(state_int, int)\n field = decode_binary(state_int)\n return [idx for idx, col in enumerate(field) if len(col) < GAME_ROWS]", "def valid_moves(board):\n return [i for i, x in enumerate(board) if x == ' ']", "def move(self, direction=0):\n moves = [\n [3, 1],\n [2, 2],\n [1, 3],\n [0, 0]\n ]\n\n self._rotate(moves[direction][0])\n\n for row in range(4):\n r = [i for i in self.map[row] if i != 0]\n\n r_result = []\n while(len(r)):\n num = r.pop(0)\n if len(r) and num == r[0]:\n num += r.pop(0)\n # TODO: Do a 2048 check here to see if the player won?\n # this might not be the best place because we could use\n # this method to run tests to see if the player has any valid moves\n r_result.append(num)\n \n self.map[row] = r_result + [0]*(4-len(r_result))\n\n self._add_random_piece()\n\n self._rotate(moves[direction][1])\n self.print_map()", "def human_go(self, board):\r\n coord_pattern = re.compile(\r\n \"[0-{}],[0-{}]\".format(board.shape[0], board.shape[1])\r\n )\r\n print(\"Enter Coordinates of your go then press enter.\")\r\n input_str = input(\"(space seperated, 0-2 with origin in top left)\\n\")\r\n\r\n if not coord_pattern.match(input_str):\r\n print(\"That is not in the right format, please try again...\")\r\n return self.human_go(board)\r\n else:\r\n y, x = [int(coord) for coord in input_str.split(\",\")]\r\n if board[x][y] != 0:\r\n print(\"That square is already taken, please try again\")\r\n self.human_go()\r\n else:\r\n board[x][y] = -1\r\n return board", "def next_move(self,board):\r\n avail_col = [i for i in range(board.width) if board.can_add_to(i) == True]\r\n col = random.choice(avail_col)\r\n self.num_moves += 1\r\n return col", "def make_move(move, player, board):\n board[move] = player\n for d in DIRECTIONS:\n Othello.make_flips(move, player, board, d)\n return board", "def str_to_move(self, string):\n if not string.strip().isdigit():\n return -1\n\n return int(string.strip())" ]
[ "0.83378243", "0.8336306", "0.83127975", "0.82836413", "0.722551", "0.7191393", "0.7191393", "0.7191393", "0.7188351", "0.7188351", "0.71681666", "0.71523356", "0.61226887", "0.6058321", "0.582009", "0.5713693", "0.56568706", "0.5623423", "0.5570228", "0.556483", "0.5557871", "0.5519342", "0.5497061", "0.5493463", "0.54759836", "0.5464171", "0.54581976", "0.545712", "0.5456884", "0.5454254", "0.5445326", "0.5437252", "0.54272425", "0.5424985", "0.53890276", "0.53829145", "0.5353596", "0.53491324", "0.5345275", "0.53387934", "0.5302614", "0.5297399", "0.5289441", "0.5287411", "0.52738214", "0.52641624", "0.52553475", "0.5245241", "0.5243925", "0.5243294", "0.52406424", "0.52385485", "0.52297306", "0.52273476", "0.5224559", "0.5223264", "0.52228737", "0.5220709", "0.52078795", "0.52053964", "0.52032167", "0.51988465", "0.519362", "0.5189018", "0.51835793", "0.5179422", "0.5174582", "0.51591307", "0.51540256", "0.51422143", "0.5140733", "0.51389986", "0.513638", "0.51359004", "0.5135754", "0.5134323", "0.5131066", "0.5127598", "0.5126251", "0.51262325", "0.5112547", "0.51121855", "0.51081705", "0.51046216", "0.51040643", "0.5103414", "0.5101376", "0.5097974", "0.50946414", "0.5090383", "0.508933", "0.50857025", "0.50780207", "0.5073065", "0.50624293", "0.50545734", "0.50490236", "0.50484407", "0.50425166", "0.5040598" ]
0.83451086
0
Checks if AutoML can be loaded from a folder
def _check_can_load(self): if self.results_path is not None: # Dir exists and can be loaded if os.path.exists(self.results_path) and os.path.exists( os.path.join(self.results_path, "params.json") ): self.load(self.results_path) self._results_path = self.results_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_load(cls, filename):\n return False", "def in_folder(self):\n return len(os.path.split(self.file_path)) > 1", "def is_valid_animation(path, verbose=True):\n try:\n if \"idle\" in os.listdir(path) or \"transition\" in os.listdir(path):\n return True\n else:\n if verbose:\n print(path, \"is not a valid animation folder! It needs an /idle or /transition folder!\")\n return False\n except:\n return False", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def autoload(self):\n\t\tpath = self.world.config[\"plugin\"][\"path\"]\n\t\tif not self.load_glob(path):\n\t\t\treturn False\n\t\tif not self.check_deps():\n\t\t\treturn False\n\t\treturn True", "def __isValidXMLResourcesFolder(self, folder):\n tablesInFolder = filter(lambda f: os.path.isdir(os.path.join(folder, f)),\n os.listdir(folder))\n containedInRequiredTables = map(lambda f: f in self.__requiredTables,tablesInFolder)\n return (True if len(containedInRequiredTables)>0 else False)", "def isMayaFile(potentialMayaFile):\n\n pass", "def check_if_exists(self): \r\n dir_name = os.path.dirname(os.path.abspath(__file__))\r\n fucntion_dir = os.path.join(dir_name, 'openfaas', self.name)\r\n if not os.path.isdir(fucntion_dir):\r\n raise ValueError(\r\n f\"Function name `{self.name}` provided does not exist.\")\r\n self.yaml_path = os.path.join(fucntion_dir, f\"{self.name}.yml\")\r\n return True", "def __is_file_eligible_to_scan(cls, path_to_test):\n return path_to_test.endswith(\".md\")", "def check_if_anim_exist(name, ext=vext, figpath=figpath):\n return not(os.path.isfile(format_filename(name, ext, figpath)))", "def __check_in_autonotes_dir():\n if not os.path.isfile('master.tex'):\n cli.log.error(f'I can\\'t find a {emph(\"master.tex\")} file, '\n 'are you inside an autonotes directory?')\n exit(3)", "def __contains__(self, name):\n return (self.model_dir / (str(name) + '.pkl')).exists()", "def check_loader(self, dt):\n if EVENTS['FILE_PATH'] and EVENTS['CAN_WRITE']:\n self.editor.load_file(EVENTS['FILE_PATH'])\n EVENTS['CAN_WRITE'] = False", "def isLoaded(self,fileName):\n return mwIniFile.isLoaded(fileName)", "def test1_loading(self):\n\t\tprint \"\\nTEST 1: Loading ontologies from %s folder.\\n=================\" % DATA_FOLDER\n\t\t\n\t\tfor f in os.listdir(DATA_FOLDER):\n\t\t\tif not f.startswith('.'):\n\t\t\t\tprint \"Loading... >\", f\t\t\n\t\t\t\t\n\t\t\t\to = ontospy.Ontology(DATA_FOLDER + f)\n\t\t\t\t\n\t\t\t\tself.assertEqual(type(o), ontospy.Ontology)\n\t\t\t\tprint \"Success.\"", "def is_file_exist(self):\n return os.path.isfile(os.path.join(self.output_path, 'amr_corpus_ext.pickle'))", "def has_file(self, doc):\n return len(doc.package.files) != 0", "def is_resource(self, path):\n # type: (Text) -> bool\n raise FileNotFoundError", "def is_app_dir(path):\n try:\n find_app_yamls(path)\n return True\n except ValueError:\n return False", "def load(self):\n return True", "def has_annotations(filepath):\n return filepath.endswith('.ll') and '[#uses=' in open(filepath).read()", "def isLoaded(self,modFile):\n return (modFile in self.loadFiles)", "def _is_azureml_available() -> bool:\n if importlib.util.find_spec(\"azureml\") is None:\n return False\n if importlib.util.find_spec(\"azureml.core\") is None:\n return False\n return importlib.util.find_spec(\"azureml.core.run\") is not None", "def __check_exists(self):\n\n return os.path.exists(os.path.join(self.__path, 'train_images_tensor.pt')) and \\\n os.path.exists(os.path.join(self.__path, 'train_labels_tensor.pt')) and \\\n os.path.exists(os.path.join(self.__path, 'test_images_tensor.pt')) and \\\n os.path.exists(os.path.join(self.__path, 'test_labels_tensor.pt'))", "def detect(self, path):\n valid = False\n path = pathlib.Path(path)\n # basic check for suffix\n try:\n if path.suffix == self.suffix:\n valid = True\n except ValueError:\n pass\n\n # advanced check with \"detect\"\n if valid and \"detect\" in self.recipe:\n fdetect = self.recipe[\"detect\"]\n valid = fdetect(path)\n\n return valid", "def test_load_file(self):\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/pl_test1_\"+self.loader.version))\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/pl_test2_\"+self.loader.version))", "def can_load(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext'):\n meta_exists = os.path.exists(os.path.join(context.get_path(), \"{0}.ckpt.meta\".format(step.get_name())))\n index_exists = os.path.exists(os.path.join(context.get_path(), \"{0}.ckpt.index\".format(step.get_name())))\n\n return meta_exists and index_exists", "def isVideoFolder():", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)", "def in_maya():\n return \"maya.bin\" in sys.argv[0]", "def is_model(folder: Text) -> bool:\n\n model_identifier_filename = 'MLmodel'\n\n if folder.startswith('gs://'):\n\n bucket, *model_folder_path_parts = folder.strip('gs://').split('/')\n model_folder_path = '/'.join(model_folder_path_parts)\n client = storage.Client()\n bucket = client.get_bucket(bucket)\n model_blob = bucket.blob(os.path.join(model_folder_path, model_identifier_filename))\n\n return model_blob.exists()\n\n if os.path.exists(folder) and os.path.isdir(folder):\n return model_identifier_filename in os.listdir(folder)\n\n return False", "def ValidatePath(self, root_path: str) -> bool:\n if 'gold' in root_path:\n return True\n\n return False", "def __is_valid_yml_file(path_to_yml_file: str) -> bool:\n\n return path_to_yml_file.endswith(\".yml\") or path_to_yml_file.endswith(\".yaml\")", "def autodetect():\n\n\tfqdn = socket.getfqdn()\n\tif not \".cheyenne\" in fqdn:\n\t\treturn False\n\n\tdirs = os.path.abspath(__file__).split('/')\n\tsweet_src_dirname = dirs[len(dirs)-5]\n\n\t# Autodetect based on source folder name for MULE source\n\t# This helps to utilize different versions of MULE on cheyenne\n\tif sweet_src_dirname==\"sweet_gnu\":\n\t\treturn True\n\n\treturn False", "def assert_train_augmented(self) -> bool:\n dalet = Path(os.environ[\"DATA_PATH\"]) / \"characters\" / \"train\" / \"Dalet\"\n truth_value = False\n try:\n if len(list(dalet.iterdir())) != 72: # downloaded number of chars\n truth_value = True\n except FileNotFoundError:\n pass # this is ok because we handle the truth_value\n return truth_value", "def can_ingest(cls, path: str) -> bool:\r\n \"\"\"Checks if the file can be ingested\r\n Returns:\r\n Bool: True if the file can be ingested else False\r\n \"\"\"\r\n return Path(path).suffix in [\".csv\", \".txt\", \".pdf\", \".docx\"]", "def load_rule_files ( self, files_or_dirs, ignore_missing=False ):\n if ignore_missing:\n try:\n ret = self._resolver.get_reader().read ( files_or_dirs )\n except IOError as ioerr:\n if ioerr.errno == errno.ENOENT:\n ret = False\n else:\n raise\n else:\n ret = self._resolver.get_reader().read ( files_or_dirs )\n\n self.fixup_pool_id()\n return True if ret is None else ret", "def _check_folder(self, folder):\n if os.path.exists(folder):\n def ismatch(name):\n # check for libpythonXX or pythonXX... and shared library extension\n sl_name = (self._libname, 'lib' + self._libname)\n sl_ext = ('.dll', '.so', '.dylib')\n return name.startswith(sl_name) and name.endswith(sl_ext)\n names = [n for n in os.listdir(folder) if ismatch(n)]\n if len(names) > 0:\n return os.path.join(folder, names[0])\n return None", "def check(filepath: str) -> bool:\n\n logging.debug(filepath, extra=dict(status=\"checking\"))\n\n try:\n with open(filepath) as f:\n toml.load(f)\n except toml.TomlDecodeError as err:\n logging.error(filepath, extra=dict(status=err.msg))\n return False\n\n logging.info(filepath, extra=dict(status=\"ok\"))\n return True", "def load_ontology(\n scenarios_dir: pathlib.Path,\n) -> Tuple[Optional[rasaeco.model.Ontology], List[str]]:\n errors = [] # type: List[str]\n\n path_map = dict() # type: MutableMapping[str, pathlib.Path]\n meta_map = dict() # type: MutableMapping[str, rasaeco.meta.Meta]\n\n scenario_pths = sorted(scenarios_dir.glob(\"**/scenario.md\"))\n\n for pth in scenario_pths:\n xml_pth = as_xml_path(scenario_path=pth)\n if not xml_pth.exists():\n errors.append(\n f\"The intermediate XML representation for the scenario {pth} \"\n f\"does not exist: {xml_pth}; \"\n f\"did you render the scenarios to intermediate XML representation \"\n f\"already?\"\n )\n\n if errors:\n return None, errors\n\n for pth in scenario_pths:\n meta, meta_errors = rasaeco.meta.extract_meta(\n text=pth.read_text(encoding=\"utf-8\")\n )\n\n for error in meta_errors:\n errors.append(f\"In file {pth}: {error}\")\n\n if meta_errors:\n continue\n\n assert meta is not None\n\n for i, cubelet in enumerate(meta[\"volumetric\"]):\n ##\n # Verify aspect range\n ##\n\n range_error = rasaeco.model.verify_aspect_range(\n first=cubelet[\"aspect_from\"], last=cubelet[\"aspect_to\"]\n )\n\n if range_error:\n errors.append(\n f\"In file {pth} and cubelet {i + 1}: \"\n f\"Invalid aspect range: {range_error}\"\n )\n\n range_error = rasaeco.model.verify_phase_range(\n first=cubelet[\"phase_from\"], last=cubelet[\"phase_to\"]\n )\n\n if range_error:\n errors.append(\n f\"In file {pth} and cubelet {i + 1}: \"\n f\"Invalid phase range: {range_error}\"\n )\n\n range_error = rasaeco.model.verify_level_range(\n first=cubelet[\"level_from\"], last=cubelet[\"level_to\"]\n )\n\n if range_error:\n errors.append(\n f\"In file {pth} and cubelet {i + 1}: \"\n f\"Invalid level range: {range_error}\"\n )\n\n identifier = pth.parent.relative_to(scenarios_dir).as_posix()\n\n meta_map[identifier] = meta\n path_map[identifier] = pth\n\n scenario_id_set = set(meta_map.keys())\n\n for identifier, meta in meta_map.items():\n for relate_to in meta[\"relations\"]:\n if relate_to[\"target\"] not in scenario_id_set:\n errors.append(\n f\"In file {path_map[identifier]}: \"\n f\"The relation {relate_to['nature']!r} is invalid \"\n f\"as the identifier of the target scenario can not be found: \"\n f\"{relate_to['target']!r}\"\n )\n\n if errors:\n return None, errors\n\n scenarios = [] # type: List[rasaeco.model.Scenario]\n for identifier, meta in meta_map.items():\n volumetric = [] # type: List[rasaeco.model.Cubelet]\n for cubelet in meta[\"volumetric\"]:\n volumetric.append(\n rasaeco.model.Cubelet(\n aspect_range=rasaeco.model.AspectRange(\n first=cubelet[\"aspect_from\"], last=cubelet[\"aspect_to\"]\n ),\n phase_range=rasaeco.model.PhaseRange(\n first=cubelet[\"phase_from\"], last=cubelet[\"phase_to\"]\n ),\n level_range=rasaeco.model.LevelRange(\n first=cubelet[\"level_from\"], last=cubelet[\"level_to\"]\n ),\n )\n )\n\n pth = path_map[identifier]\n definitions, extraction_errors = _extract_definitions(xml_path=as_xml_path(pth))\n if extraction_errors:\n errors.extend(extraction_errors)\n else:\n assert definitions is not None\n\n scenario = rasaeco.model.Scenario(\n identifier=identifier,\n title=meta[\"title\"],\n contact=meta[\"contact\"],\n volumetric=volumetric,\n definitions=definitions,\n relative_path=pth.relative_to(scenarios_dir),\n )\n\n scenarios.append(scenario)\n\n relations = [] # type: List[rasaeco.model.Relation]\n for identifier, meta in meta_map.items():\n for relation in meta[\"relations\"]:\n relations.append(\n rasaeco.model.Relation(\n source=identifier,\n target=relation[\"target\"],\n nature=relation[\"nature\"],\n )\n )\n\n ontology = rasaeco.model.Ontology(scenarios=scenarios, relations=relations)\n\n for scenario in ontology.scenarios:\n pth = scenarios_dir / scenario.relative_path\n validation_errors = _validate_references(\n scenario=scenario, ontology=ontology, xml_path=as_xml_path(pth)\n )\n\n for error in validation_errors:\n errors.append(f\"When validating references in {pth}: {error}\")\n\n if errors:\n return None, errors\n\n return ontology, []", "def check_cleaned(path):\n bool_1 = isdir(join(path, 'Main'))\n bool_2 = isdir(join(path, 'Finantial'))\n bool_3 = bool_1 and bool_2\n return bool_3", "def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'processed/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'processed/test.pkl')))", "def valid_resfile(listname):\r\n global results_file, directory_res\r\n try:\r\n results_file = open(directory_res+listname+\".output\", \"r\")\r\n return True\r\n except:\r\n return False", "def canLoad(self):\n #xxx should be corrected to work on all subclass hierarchy\n return 'load' in self.__class__.__dict__", "def _IsWellFormattedFilePath(path):\n return path.startswith(SRC) and path.endswith(_OWNERS)", "def sanity_check_step(self):\n custom_paths = {\n 'files':[\"%s/%s\" % (self.bindir, x) for x in [\"convert\", \"cplex\", \"cplexamp\"]],\n 'dirs':[],\n }\n super(EB_CPLEX, self).sanity_check_step(custom_paths=custom_paths)", "def check_path(filename):\n return not bool(checkPath(filename))", "def ejscreen_areas_of_concern_data_exists(cls):\n return cls.EJSCREEN_AREAS_OF_CONCERN_SOURCE.is_file()", "def check_decoders(self):\n\n for d in self.get_decoder_paths():\n full_path = d + \".data-00000-of-00001\"\n file = Path(full_path)\n if not file.exists():\n return False\n\n return True", "def load(self,filename=None): # return True\r\n pass", "def exists():\n check50.include(\"data\")\n check50.exists(\"adventure.py\")\n check50.exists(\"room.py\")", "def is_dir(self, path):", "def do_check(path):\n found_source_content = False\n iml_file = os.path.join(path, 'project.iml')\n self.assertTrue(os.path.exists(iml_file))\n dom = minidom.parse(iml_file)\n expected_paths = [\"file://\" + os.path.join(get_buildroot(), _path) for _path in [\n 'examples/src/java/org/pantsbuild/example/hello',\n 'examples/src/java/org/pantsbuild/example/hello/greet',\n 'examples/src/java/org/pantsbuild/example/hello/main',\n 'examples/src/java/org/pantsbuild/example/hello/simple',\n 'examples/src/resources/org/pantsbuild/example/hello',\n ]]\n expected_java_resource = [\"file://\" + os.path.join(get_buildroot(), _path) for _path in [\n 'examples/src/resources/org/pantsbuild/example/hello',\n ]]\n remaining = set(expected_paths)\n for sourceFolder in self._get_sourceFolders(dom):\n found_source_content = True\n self.assertEquals(\"False\", sourceFolder.getAttribute('isTestSource'))\n url = sourceFolder.getAttribute('url')\n # Check is resource attribute is set correctly\n if url in expected_java_resource:\n self.assertEquals(sourceFolder.getAttribute('type'), IdeaIntegrationTest.RESOURCE,\n msg=\"Type {c_type} does not match expected type {a_type} \"\n \"for {url}\".format(c_type=IdeaIntegrationTest.RESOURCE, url=url,\n a_type=sourceFolder.getAttribute('type')))\n self.assertIn(url, remaining,\n msg=\"Couldn't find url={url} in {expected}\".format(url=url,\n expected=expected_paths))\n remaining.remove(url)\n self.assertTrue(found_source_content)", "def can_load():\r\n # delegate to generic descriptor check to check start dates\r\n return _has_access_descriptor(user, 'load', course, course.id)", "def load(self, parser, path):\n\n if os.path.isdir(path):\n print(\"loading {} with {}\".format(path, parser))\n could_not_parse = self.load_directory(parser, path)\n else:\n could_not_parse = self.load_discourse(parser, path)\n return could_not_parse", "def _is_file_valid(name: str) -> bool:\n return not name.startswith(\".\")", "def _is_folder_exists() -> bool:\n\n pwd: str = os.getcwd()\n data_folder: str = os.path.join(pwd, \"data\")\n return os.path.isdir(data_folder)", "def _check_scene_open(self):\n return self._engine.current_file_path() is not None", "def is_known_media(extension: str) -> bool:\n return get_analyze_tool(extension) is not None", "def _is_fluxcd_app_compliant(path):\n mandatory_components = (\"base\", constants.APP_ROOT_KUSTOMIZE_FILE)\n check_mandatory = all(comp in os.listdir(path)\n for comp in mandatory_components)\n return check_mandatory", "def load(self, path: str) -> bool:\n path = replace_standard_paths(path)\n if os.path.isfile(path):\n return self.load_state(torch.load(path))\n return False", "def is_astro_dir(path):\n # is this required?\n p0 = glob.glob(os.path.join(path, '..', '*.avi'))\n\n p1 = glob.glob(os.path.join(path, \"*.bas.h5\"))\n p2 = glob.glob(os.path.join(path, \"..\", \"*.metadata.xml\"))\n return all(len(x) != 0 for x in [p1, p2, p0])", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"rdp_lineage_to_tax.py\", get_files)", "def is_valid_rule_names(self):\n # Gets all the files in the modeling rule folder\n files_to_check = get_files_in_dir(\n os.path.dirname(self.file_path), [\"json\", \"xif\", \"yml\"], False\n )\n integrations_folder = os.path.basename(os.path.dirname(self.file_path))\n invalid_files = []\n\n for file_path in files_to_check:\n file_name = os.path.basename(file_path)\n file_name_std = file_name.casefold()\n # The schema has _schema.json suffix and the testdata file has _testdata.json suffix\n # whereas the other content entity component files only has the .suffix\n splitter = (\n \"_\"\n if (\n file_name_std.endswith(\"_schema.json\")\n or file_name_std.endswith(\"_testdata.json\")\n )\n else \".\"\n )\n base_name = file_name.rsplit(splitter, 1)[0]\n\n if integrations_folder != base_name:\n invalid_files.append(file_name)\n\n if invalid_files:\n error_message, error_code = Errors.invalid_rule_name(invalid_files)\n if self.handle_error(error_message, error_code, file_path=self.file_path):\n self._is_valid = False\n return False\n\n return True", "def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'relu5-3/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'relu5-3/test.pkl')))", "def _check(self, config: Dict):\n if 'path' not in config:\n raise FileNotFoundError(\"File not found.\")", "def _validate_path(self, data_dir):\n if (os.path.exists(data_dir) \n and os.path.isdir(data_dir)\n and os.listdir(data_dir)):\n\n self.tr_img_dir = data_dir + self.TRAIN_IMAGES\n self.tr_lbl_dir = data_dir + self.TRAIN_LABELS\n self.te_img_dir = data_dir + self.TEST_IMAGES\n self.te_lbl_dir = data_dir + self.TEST_LABELS\n\n if (self._check_images_and_labels(self.tr_img_dir, self.tr_lbl_dir) \n and self._check_images_and_labels(self.te_img_dir, self.te_lbl_dir)):\n \n return True\n \n return False", "def check_for_yaml_folder(check_path):\n check_abspath = os.path.abspath(check_path)\n yaml_folders = [\"_YAML\", \"YAML\"]\n for yf in yaml_folders:\n if yf in check_abspath:\n print(\"{} folder exists : {}\".format(yf, check_abspath))\n top_path, base_path = check_abspath.split(\"{}/\".format(yf))\n out_path = os.path.dirname(os.path.join(top_path, base_path))\n if os.path.exists(out_path):\n print(\"Path exists : {}\".format(out_path))\n return out_path\n else:\n print(\"Path does not exist : {}\".format(out_path))\n print(\"Please create this folder and try again\")\n exit(1)", "def load(board: Board) -> bool:\r\n\r\n file_name = filedialog.askopenfilename(\r\n initialdir = os.getcwd(), title = 'Select file', \r\n filetypes = (('Text files','*.txt'),('All files','*.*'))\r\n )\r\n\r\n if not file_name:\r\n return False\r\n\r\n try:\r\n f = open(file_name,'r')\r\n contents = f.read()\r\n f.close()\r\n except OSError:\r\n messagebox.showinfo(message = 'Could not read the file ' + file_name + '.')\r\n return False\r\n \r\n if not board.read_from_string(contents):\r\n messagebox.showinfo(message = 'You have chosen wrong or a damaged file.')\r\n return False\r\n\r\n return True", "def check_path(self, path):\n if path in self.app_path:\n return True\n else:\n return False", "def checkName(name):\n currentpath = os.path.dirname(os.path.realpath(__file__))\n fullpath = os.path.join(currentpath, name)\n return os.path.isfile(fullpath)", "def load_corpus_abstracts():\r\n\t\r\n\tglobal abstracts_dict\r\n\tif os.path.exists(paths.path_data_abstracts_pickle):\r\n\t\tprint('\\nloading abstracts')\r\n\t\tabstracts_dict = pickle.load(open(paths.path_data_abstracts_pickle,\"rb\"))\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False", "def test_file(self):\n a = False\n if \"show()\" in open('attempt.py').read():\n a = True\n self.assertEquals(a,True)", "def check_files(self, data_path):\n files = os.listdir(data_path)\n\n if 'test_batch' not in files:\n return False\n\n if 'batches.meta' not in files:\n return False\n\n for i in range(1, 6):\n if 'data_batch_{}'.format(i) not in files:\n return False\n\n return True", "def check(module: str, force: bool = False) -> bool:\n lemmatizer = get_model(module)\n return False not in [\n os.path.exists(\n get_path(module, file.name)\n )\n for file in lemmatizer.DOWNLOADS\n ] or force", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"write_qiime_train_db.py\", get_files)", "def validate_resources(self, folder, resources):\r\n self.validate_files_exist(folder, resources)\r\n self.validate_no_duplicate_paths(resources)", "def check_model_exists(class_name):\n if path.exists(settings.get('FALAFEL_DIR') + settings.get('MODELS_DIR') + '/' + class_name + '.py'):\n return True\n else:\n return False", "def ensure_completely_loaded():\n\n global COMPLETELY_LOADED\n if COMPLETELY_LOADED:\n return True\n\n from django.core.management.validation import get_validation_errors\n from StringIO import StringIO\n get_validation_errors(StringIO(), None)\n\n COMPLETELY_LOADED = True\n return True", "def datafolderexist(name):\n folderpath = os.path.join(pathtofolder(), name)\n return os.path.exists(folderpath)", "def plugin_valid(self, filepath):\n plugin_valid = False\n for extension in self.extensions:\n if filepath.endswith(\".{}\".format(extension)):\n plugin_valid = True\n break\n return plugin_valid", "def is_loaded(self):\n return os.path.exists(IPMIService.IPMI_DEV)", "def _is_prebuilt(self, cfg, patch_idx, prefix=\"PREFIX\"):\n ext = None\n dir = None\n\n if (cfg.load_models_dir is None):\n return False\n\n # Get the unique lookup file path\n fpath = self._get_unique_lookup_filepath(patch_idx, cfg.load_models_dir, prefix, NNModel._M_FILE_EXT)\n\n # Check the existence of the file\n if not os.path.isfile(fpath):\n raise Exception('Model file does not exist: {0}'.format(fpath))\n\n return True", "def valid(self):\r\n if self.dir_exists and self.files_exist:\r\n return True\r\n else:\r\n return False", "def isLoaded():\n return _loaded is not None", "def is_valid_file(self, file_path):\n return True", "def _looks_like_resource_file(self, name):\n # inefficient since we end up reading the file twice,\n # but it's fast enough for our purposes, and prevents\n # us from doing a full parse of files that are obviously\n # not robot files\n\n if (re.search(r'__init__.(txt|robot|html|tsv)$', name)):\n # These are initialize files, not resource files\n return False\n\n found_keyword_table = False\n if (name.lower().endswith(\".robot\") or\n name.lower().endswith(\".txt\") or\n name.lower().endswith(\".tsv\") or\n name.lower().endswith(\".resource\")):\n\n with open(name, \"r\") as f:\n data = f.read()\n for match in re.finditer(r'^\\*+\\s*(Test Cases?|(?:User )?Keywords?)',\n data, re.MULTILINE|re.IGNORECASE):\n if (re.match(r'Test Cases?', match.group(1), re.IGNORECASE)):\n # if there's a test case table, it's not a keyword file\n return False\n\n if (not found_keyword_table and\n re.match(r'(User )?Keywords?', match.group(1), re.IGNORECASE)):\n found_keyword_table = True\n return found_keyword_table", "def check_file_exist(self):\n return False", "def check_helpers(self):\n paths = self.get_helper_out_paths()\n\n for p in paths:\n full_path = p + \".data-00000-of-00001\"\n file = Path(full_path)\n if not file.exists():\n return False\n\n return True", "def check_model_file_exists(filepath):\n return os.path.exists(filepath)", "def supportsFile(self, filename):\n extension = filename.rsplit(\".\", 1)[1]\n return extension in AcronymDisambiguator.supported_extensions", "def can_load(self):\n\n try:\n return self._get_nearest_entry_with_artifact() is not None\n except InternalCacheStateError as e:\n self._raise_state_error_with_explanation(e)", "def is_valid_directory(parser, arg):", "def validate(cls, config_location):\n if not os.path.isdir(config_location):\n return False\n config_path = os.path.join(config_location, cls.CONFIG_FILENAME)\n if not os.path.isfile(config_path):\n return False\n cache_dir = os.path.join(config_location, cls.CACHE_DIRNAME)\n if not os.path.isdir(cache_dir):\n return False\n if not CacheManager.validate(cache_dir):\n return False\n data_path = os.path.join(config_location, cls.DATA_DIRNAME)\n if not os.path.isdir(cache_dir):\n return False\n if not DataManager.validate(data_path):\n return False", "def is_loaded(self):\n return self.known_stations != {}", "def test_invalid_manifest_filepath(self):\n load_manifest(\"./ehiiehaiehnatheita\")", "def test(cls, pathHolder, parentCrawler):\n if not super(Scene, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() in cls.extensions()", "def isfile (self, path):\r\n pass", "def sanity_check(hdf):\n required_paths = ['Analyses', 'UniqueGlobalKey', 'Analyses/EventDetection_000']\n try:\n for p in required_paths:\n if p not in hdf:\n return False\n return True\n except:\n return False" ]
[ "0.64446145", "0.61130095", "0.60769767", "0.6070284", "0.6070284", "0.5984742", "0.57872206", "0.5693269", "0.566746", "0.5642804", "0.56116366", "0.5606089", "0.56005824", "0.5568455", "0.5547063", "0.55245954", "0.5513016", "0.54848117", "0.5477921", "0.5474287", "0.54519325", "0.54477096", "0.5439994", "0.54385996", "0.543717", "0.54022706", "0.54004055", "0.5396557", "0.53954196", "0.53921455", "0.53869975", "0.53833145", "0.53825486", "0.53819513", "0.53776634", "0.53704953", "0.5366984", "0.5366291", "0.5361576", "0.5358854", "0.5343275", "0.5335612", "0.5326848", "0.5324742", "0.5324427", "0.5307718", "0.5294084", "0.5292153", "0.5288751", "0.5286176", "0.5278278", "0.5277349", "0.5272351", "0.5263076", "0.5256456", "0.52473295", "0.52444375", "0.5230458", "0.5228953", "0.5228613", "0.52284527", "0.522766", "0.5213412", "0.51984966", "0.51934206", "0.518447", "0.5175018", "0.5171365", "0.5164363", "0.5161386", "0.5159242", "0.51576054", "0.51453954", "0.5142771", "0.5132575", "0.513196", "0.51299036", "0.512372", "0.51157093", "0.51108515", "0.5109585", "0.5109206", "0.51080334", "0.51067924", "0.51065373", "0.5101865", "0.51009804", "0.50976396", "0.5094532", "0.50888103", "0.50876814", "0.5086467", "0.50792", "0.50791824", "0.50751823", "0.5070481", "0.5067432", "0.5060591", "0.5060339", "0.5050663" ]
0.62103647
1
Validate X whenever one tries to predict, apply, predict_proba
def _validate_X_predict(self, X): # X = check_array(X, ensure_2d=False) X = np.atleast_2d(X) n_features = X.shape[1] if self.n_features_in_ != n_features: raise ValueError( f"Number of features of the model must match the input. Model n_features_in_ is {self.n_features_in_} and input n_features is {n_features}. Reshape your data." )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate_features_in_predict_input(self, result):\n pass", "def predict_proba(self, X):\n raise NotImplementedError()", "def predict_proba(self, X, **kwargs):\n raise NotImplementedError", "def predict_proba(self, X):\n X = _validate_X(X)\n return self.best_estimator_.predict_proba(X)", "def predict(self, X):\n fmodel = self.estimators_[np.array(self.estimator_errors_).argmin()]\n predictions = fmodel.predict(X)\n return predictions", "def predict_proba(self):\n ...", "def predict_proba(self, X):\n if self.metric in TSLEARN_VALID_METRICS:\n check_is_fitted(self, '_ts_fit')\n X = check_dims(X, X_fit_dims=self._ts_fit.shape, extend=True,\n check_n_features_only=True)\n X_ = self._precompute_cross_dist(X)\n pred = super().predict_proba(X_)\n self.metric = self._ts_metric\n return pred\n else:\n check_is_fitted(self, '_X_fit')\n X = check_array(X, allow_nd=True)\n X = to_time_series_dataset(X)\n X_ = to_sklearn_dataset(X)\n X_ = check_dims(X_, X_fit_dims=self._X_fit.shape, extend=False)\n return super().predict_proba(X_)", "def _predict(self, testX):\n pass", "def _predict_scores_fixed(self, X, **kwargs):\n return self.predict_proba(X, **kwargs)", "def predict_proba(self, X):\n\n stuff = self._vectorizer.transform(X)\n result = self._classifier.predict_proba(stuff)\n return result\n pass", "def predict_evidences(self, X):", "def predict(self, X):\n raise NotImplemetedError()", "def predict_proba(self, X):\n check_is_fitted(self, ['estimators_', 'final_estimator_'])\n return self.final_estimator_.predict_proba(self.transform(X))", "def predict_proba(self, X):\n return super(SGDLogistic, self).predict(X)", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n N, D = X.shape\n\n # init prediction array\n prediction = np.array([-1] * N)\n\n # retrieve the probability of predicting fraud for each model (K models)\n predict_proba_fraud = [-1] * self.K\n\n # we do the computation for all input test examples\n for i, instance in enumerate(X):\n sum_weight = 0\n F_k = 0\n\n # for k in= {1,2.....K} do\n k = -1\n for model in self.models.islice(start=0, stop=self.K, reverse=True):\n k += 1\n clf = model.clf\n sum_weight += model.weight\n\n # (1) compute the corresponding Fk(x)\n # compute one part of Fk(y) with the weights (be careful: sum_weight may be 0)\n F_k = (F_k * sum_weight) / sum_weight if sum_weight != 0 else 0\n\n # if the probability is not initialized we call the predict proba method\n if (type(predict_proba_fraud[k]) is int and predict_proba_fraud[k] == -1) \\\n or (predict_proba_fraud[k].shape[0] != self.S):\n predict_proba_fraud[k] = clf.predict_proba(self.X_chunk)\n\n # if we don't have the probability of predicting fraud --> p = 0, do nothing\n if len(predict_proba_fraud[k][i]) == 2:\n F_k += (model.weight * predict_proba_fraud[k][i][1]) / sum_weight\n\n # (2) we assign Fk value to a bin j\n t_y = instance[-1] # amount of the transaction (in the last column of the features)\n found = False # found: if a label has been decided (deal with 2 for's break)\n j = 0\n eps = len(self.bins)\n\n # while we haven't found the bin AND no prediction has not yet been given\n while j < eps and not found:\n stat = self.bins[j][k]\n\n # find the bin i y belongs to\n if (j / eps) <= F_k < ((j + 1) / eps):\n # (3) apply rule (10) for this bin (What if the amount is 0 ?)\n if t_y != 0:\n if F_k - stat['mean'] - self.t * stat['var'] > (self.cost / t_y): # FRAUD\n found = True\n prediction[i] = 1\n elif F_k + stat['mean'] + self.t * stat['var'] <= (self.cost / t_y): # NON-FRAUD\n found = True\n prediction[i] = 0\n else:\n found = True\n prediction[i] = 0\n\n j = j + 1\n\n if found: # if we found a value we go to the next example\n break\n\n # (4) if no classifier left i.e. we have consulted every classifier without having an answer\n # --> prediction[i] is not yet given\n if prediction[i] == -1:\n if instance[-1] != 0 and F_k > self.cost / instance[-1]: # instance[-1] is just t(y)\n prediction[i] = 1\n else:\n prediction[i] = 0\n\n return prediction", "def test_predict_proba_nonnegative():\n\n def check_for_negative_prob(proba):\n for p in np.ravel(proba):\n assert np.round(p,7) >= 0\n\n clf = mord.LogisticAT(alpha=0.)\n clf.fit(X, y)\n check_for_negative_prob(clf.predict_proba(X))\n\n clf2 = mord.LogisticIT(alpha=0.)\n clf2.fit(X, y)\n check_for_negative_prob(clf2.predict_proba(X))\n\n clf3 = mord.LogisticSE(alpha=0.)\n clf3.fit(X, y)\n check_for_negative_prob(clf3.predict_proba(X))", "def predict(self, X):", "def predict(self, X):", "def _fit_predict(X_train, y_train, X_test):\n raise NotImplementedError()", "def predict_proba(self, X):\n return self.model.predict_proba(X)", "def predict_proba(self, X):\n if self._label_binarier is True:\n label_binarier=LabelBinarizer()\n label_binarier.fit(np.arange(500000))\n X_id=label_binarier.transform(X['model_serial'])\n X.drop(['model_serial'],axis=1,inplace=True)\n X=np.concatenate((X.to_numpy(),X_id),axis=1)\n y_pred = np.array(\n [model.predict_proba(X) for model in self.estimators_]\n ).mean(axis=0)\n if y_pred.ndim == 1:\n y_pred = y_pred[:, np.newaxis]\n if y_pred.shape[1] == 1:\n y_pred = np.append(1 - y_pred, y_pred, axis=1)\n print(y_pred.shape)\n return y_pred", "def predict_proba(self, X):\n self.check_is_fitted()\n X = self._check_clusterer_input(X)\n return self._predict_proba(X)", "def predict_proba(self, X):\n return self.LR.predict_proba(self.transform(X))", "def predict_proba(self, X: np.ndarray) -> np.ndarray:\n return self._rf.predict_proba(X)", "def check_predict_proba_one_binary(classifier, dataset):\n\n for x, y in dataset:\n y_pred = classifier.predict_proba_one(x)\n classifier = classifier.learn_one(x, y)\n assert set(y_pred.keys()) == {False, True}", "def check_predict_proba_one(classifier, dataset):\n\n from river import utils\n\n if not hasattr(classifier, \"predict_proba_one\"):\n return\n\n for x, y in dataset:\n xx, yy = copy.deepcopy(x), copy.deepcopy(y)\n\n classifier = classifier.learn_one(x, y)\n y_pred = classifier.predict_proba_one(x)\n\n if utils.inspect.isactivelearner(classifier):\n y_pred, _ = y_pred\n\n # Check the probabilities are coherent\n assert isinstance(y_pred, dict)\n for proba in y_pred.values():\n assert 0.0 <= proba <= 1.0\n assert math.isclose(sum(y_pred.values()), 1.0)\n\n # Check predict_proba_one is pure (i.e. x and y haven't changed)\n assert x == xx\n assert y == yy", "def _predict(self, X):\n raise NotImplementedError", "def predict_proba(self, X_test):\n return self.model.predict_proba(X_test)", "def predict_proba(self, X):\n self._check_is_fitted('predict_proba')\n return self.best_estimator_.predict_proba(X)", "def predict(self, X):\n X = _validate_X(X)\n return self.best_estimator_.predict(X)", "def _fit(self, x_train, y_train, x_valid, y_valid, regressor_callback=None):", "def decision_function(self, X):\n check_is_fitted(self, ['model_', 'history_'])\n X = check_array(X)\n\n if self.preprocessing:\n X_norm = self.scaler_.transform(X)\n else:\n X_norm = np.copy(X)\n\n # Predict on X and return the reconstruction errors\n pred_scores = self.model_.predict(X_norm)\n return pairwise_distances_no_broadcast(X_norm, pred_scores)", "def predict(self, X_test):\n if self.basis_func is not None:\n X_transformed = self.basis_func(X_test)\n else:\n X_transformed = X_test\n\n # Marginalise predictions over hyperparameters\n mu = np.zeros([len(self.hypers), X_transformed.shape[0]])\n var = np.zeros([len(self.hypers), X_transformed.shape[0]])\n\n for i, h in enumerate(self.hypers):\n mu[i] = np.dot(self.models[i][0].T, X_transformed.T)\n var[i] = 1. / h[1] + np.diag(np.dot(np.dot(X_transformed, self.models[i][1]), X_transformed.T))\n\n m = mu.mean(axis=0)\n v = var.mean(axis=0)\n # Clip negative variances and set them to the smallest\n # positive float value\n if v.shape[0] == 1:\n v = np.clip(v, np.finfo(v.dtype).eps, np.inf)\n else:\n v = np.clip(v, np.finfo(v.dtype).eps, np.inf)\n v[np.where((v < np.finfo(v.dtype).eps) & (v > -np.finfo(v.dtype).eps))] = 0\n\n return m, v", "def _validate_X_predict(self, X, check_input=True):\n if check_input:\n X = check_array(X, dtype=DTYPE, accept_sparse=\"csr\")\n if issparse(X) and (X.indices.dtype != np.intc or\n X.indptr.dtype != np.intc):\n raise ValueError(\"No support for np.int64 index based \"\n \"sparse matrices\")\n\n n_features = X.shape[1]\n # if self.n_features_ != n_features:\n # raise ValueError(\"Number of features of the model must \"\n # \"match the input. Model n_features is %s and \"\n # \"input n_features is %s \"\n # % (self.n_features_, n_features))\n\n return X", "def predict_only(self):", "def validation():\n global SEARCH_REGION\n predictor = None\n response = []\n count = 0\n for chromosome in train_set:\n if chromosome not in configs.chromosome_list:\n continue\n for inf in train_set[chromosome]:\n strand = inf[3]\n if strand == 1:\n base = inf[0] - SEARCH_REGION\n final = inf[0] \n else:\n base = inf[1]\n final = inf[1] + SEARCH_REGION\n value = inf[2]\n if base < 0:\n continue\n result = fectch_predictor_avg(chromosome, base, final)\n if result is None:\n continue\n response.append(value)\n if predictor is None:\n predictor = result\n else:\n predictor = np.vstack((predictor, result))\n count += 1\n print(\"in train:\", predictor.shape)\n response = np.asarray(response).T\n regr = linear_model.LinearRegression()\n regr.fit(predictor, response)\n \n pre_response = regr.predict(predictor)\n adj_r2 = util.adj_r2_score(response, pre_response,count,state_n)\n r2 = sklearn.metrics.r2_score(response, pre_response)\n configs.toLog(\"train r2:{}\".format(r2))\n configs.toLog(\"train adjr2:{}\".format(adj_r2))\n\n predictor = None\n response = [] \n count = 0\n for chromosome in test_set:\n if chromosome not in configs.chromosome_list:\n continue\n for inf in test_set[chromosome]:\n strand = inf[3]\n if strand == 1:\n base = inf[0] - SEARCH_REGION\n final = inf[0] \n else:\n base = inf[1]\n final = inf[1] + SEARCH_REGION\n value = inf[2]\n if base < 0:\n continue\n result = fectch_predictor_avg(chromosome, base, final)\n if result is None:\n continue\n response.append(value)\n if predictor is None:\n predictor = result\n else:\n predictor = np.vstack((predictor, result))\n count += 1\n print(\"in test:\", predictor.shape)\n pre_response = regr.predict(predictor)\n adj_r2 = util.adj_r2_score(response, pre_response, count, state_n)\n r2 = sklearn.metrics.r2_score(response, pre_response)\n configs.toLog(\"test r2:{}\".format(r2))\n configs.toLog(\"test adjr2:{}\".format(adj_r2))", "def predict_probas(self, X):\n return self.model.predict(X, batch_size=self.batch_size, verbose=self.verbose)", "def predict_proba(self, X):\n raise NotImplemented(\"predict_proba function is currently disabled for\"\n \"clustering due to inconsistent behaviours.\")", "def predict(self, X):\n raise NotImplementedError", "def predict(self, X):\n\n # validation before prediction\n X = self._predict_strategy_validator(self, X)\n\n # get the alpha and betas, then create linear equation for predictions\n alpha = self.statistics_[\"coefs\"].values[0]\n betas = self.statistics_[\"coefs\"].values[1:]\n preds = alpha + betas.dot(X.T)\n return preds", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict(self, X):\n pass", "def predict_proba(self, X):\n return self._apply_predict_method(\n X, \"predict_proba\", _predict_proba_piecewise_estimator,\n len(self.mean_estimator_.classes_))", "def predict(\n self, X: Optional[ArrayLike], X_rel: List[RelationBlock] = []\n ) -> np.ndarray:\n return self.predict_proba(X, X_rel) > 0.5", "def test_predict(self):\n self.regression_single.predict(self.X_test)\n self.assertTrue(len(self.regression_single.y_pred))\n self.regression_boston.predict(self.boston_x_test)\n self.assertTrue(len(self.regression_boston.y_pred))", "def _predict_scores_fixed(self, X, **kwargs):\n raise NotImplementedError", "def predict(self, model, x_test):\n pass", "def validate(self, candidate, **kwargs) -> bool:\n return super().validate(candidate, **kwargs) and self._predictor.validate(candidate)", "def cross_validate(self, pre_train=None, **kwargs):\n \n inferred = np.nan * np.ones_like(self.labels_array)\n N_training_set, N_labels = inferred.shape\n N_stop_at = kwargs.pop(\"N\", N_training_set)\n\n debug = kwargs.pop(\"debug\", False)\n \n kwds = { \"threads\": self.threads }\n kwds.update(kwargs)\n\n for i in range(N_training_set):\n \n training_set = np.ones(N_training_set, dtype=bool)\n training_set[i] = False\n\n # Create a clean model to use so we don't overwrite self.\n model = self.__class__(\n self.training_labels[training_set],\n self.training_fluxes[training_set],\n self.training_flux_uncertainties[training_set],\n **kwds)\n\n # Initialise and run any pre-training function.\n for _attribute in self._descriptive_attributes:\n setattr(model, _attribute[1:], getattr(self, _attribute[1:]))\n\n if pre_train is not None:\n pre_train(self, model)\n\n # Train and solve.\n model.train()\n\n try:\n inferred[i, :] = model.fit(self.training_fluxes[i],\n self.training_flux_uncertainties[i], full_output=False)\n\n except:\n logger.exception(\"Exception during cross-validation on object \"\n \"with index {0}:\".format(i))\n if debug: raise\n\n if i == N_stop_at + 1:\n break\n\n return inferred[:N_stop_at, :]", "def predict_proba(self, X):\n score = self.decision_function(X)\n try:\n return self.loss_._score_to_proba(score)\n except NotFittedError:\n raise\n except AttributeError:\n raise AttributeError('loss=%r does not support predict_proba' %\n self.loss)", "def validate(self, val_X, val_Y, alpha):\n pred = self.predict(val_X, alpha)\n return (pred == val_Y).mean()", "def predict_proba(self, X_test):\n return self.classifier.predict_proba(X_test)", "def cross_val_pred_both(model, X_train, y_train, X_test, cv=5, n_class=2, problem_type='infer'):\n if problem_type == 'infer':\n problem_type = get_problem_type(y_train)\n if problem_type == 'classification':\n pred_train = np.zeros((len(y_train), n_class))\n pred_test = np.zeros((len(X_test), n_class))\n else:\n pred_train = np.zeros(len(y_train))\n pred_test = np.zeros(len(X_test))\n \n if cv > 1:\n kfold=KFold(len(X_train), n_folds=cv)\n\n if problem_type == 'classification':\n for train_index, test_index in kfold:\n model.fit(X_train.iloc[train_index], y_train.iloc[train_index])\n pred_train[test_index] = model.predict_proba(X_train.iloc[test_index])\n pred_test = pred_test + model.predict_proba(X_test)\n else:\n for train_index, test_index in kfold:\n model.fit(X_train.iloc[train_index], y_train.iloc[train_index])\n pred_train[test_index] = model.predict(X_train.iloc[test_index])\n pred_test = pred_test + model.predict(X_test) \n\n pred_test = pred_test/float(cv)\n elif cv == 1:\n if problem_type == 'classification':\n model.fit(X_train, y_train)\n pred_train = model.predict_proba(X_train)\n pred_test = model.predict_proba(X_test)\n else:\n model.fit(X_train, y_train)\n pred_train = model.predict(X_train)\n pred_test = model.predict(X_test) \n return pred_train, pred_test", "def test_predict(self, pipeline):\n pipeline.fit(X, Y)\n y_out_fit = pipeline.predict(X_TEST_1)\n assert isinstance(y_out_fit, np.ndarray)\n assert y_out_fit.ndim == 1\n pipeline.partial_fit(X, Y)\n y_out_partial_fit = pipeline.predict(X_TEST_2)\n assert isinstance(y_out_partial_fit, np.ndarray)\n assert y_out_partial_fit.ndim == 1", "def predict(self, X):\n return self.decision_function(X)", "def predict(x, clf):\n\n y_pred, y_prob = clf.predict(x), clf.predict_proba(x)\n return y_pred, y_prob", "def _predict_proba(self, X):\n # self._X should be the stored _X\n dist_mat = self._distance(X, self._X)\n\n y_pred = self.knn_estimator_.predict_proba(dist_mat)\n\n return y_pred", "def staged_predict_proba(self, X):\n try:\n for score in self._staged_decision_function(X):\n yield self.loss_._score_to_proba(score)\n except NotFittedError:\n raise\n except AttributeError:\n raise AttributeError('loss=%r does not support predict_proba' %\n self.loss)[", "def predict_proba(self, X):\n return self.activation(X)", "def test_fit_predict_proba_with_false_in_keep_meta_X(self) -> type(None):\n X, y = get_dataset_for_classification()\n clf = StackingClassifier(keep_meta_X=False)\n _ = clf.fit_predict_proba(X, y)\n self.assertFalse(clf.keep_meta_X)\n self.assertTrue(clf.meta_X_ is None)", "def predict(self, X):\n X_pp = self.preprocessor.transform(X)\n # Make predictions on the data here\n return(self.estimator.score(X_pp))", "def predict_proba(self, X):\n # Wrap 'predict_proba' using Lambda expression\n predict_proba = (lambda clf, x: clf.estimator.predict_proba(x) \n if clf.get_info()['predict_probas'] else np.zeros(x.shape[0]))\n return [(name, predict_proba(clf, X)) for name, clf in self.clf.items()]", "def Xval_on_single_patient(predictor_cls, feature_extractor, patient_name=\"Dog_1\",preprocess=True):\r\n # predictor_cls is a handle to an instance of PredictorBase\r\n # Instantiate the predictor \r\n predictor = predictor_cls()\r\n base_dir = Global.path_map('clips_folder')\r\n base_dir = '/nfs/data3/kaggle_seizure/clips/'\r\n loader = DataLoader(base_dir, feature_extractor)\r\n\r\n X_list,y_seizure, y_early = loader.blocks_for_Xvalidation(patient_name,preprocess=preprocess)\r\n #X_train,y_seizure, y_early = loader.training_data(patient_name)\r\n #y_train = [y_seizure,y_early]\r\n #X_list,y_list = train_test_split(X_train,y_train)\r\n\r\n # running cross validation\r\n print(patient_name)\r\n print(\"\\ncross validation: seizures vs not\")\r\n result_seizure = XValidation.evaluate(X_list, y_seizure, predictor, evaluation=auc)\r\n print('cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \\\r\n % (np.mean(result_seizure), np.std(result_seizure), result_seizure))\r\n print(\"\\ncross validation: early_vs_not\")\r\n result_early = XValidation.evaluate(X_list, y_early, predictor, evaluation=auc)\r\n print('cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \\\r\n % (np.mean(result_early), np.std(result_early), result_early))\r\n return result_seizure,result_early", "def validate_on_batch(\n network,\n loss_fn,\n X,\n y_target\n):\n # Do the forward pass to predict the primitive_parameters\n y_hat = network(X)\n loss = loss_fn(y_hat, y_target)\n return (\n loss.item(),\n [x.data if hasattr(x, \"data\") else x for x in y_hat],\n )", "def predict(self, X):\n preds = [np.multiply(\n np.array(exp.estimator.predict(X)), exp.weight\n ) for exp in self.experts]\n\n sum_weights = np.sum(\n [exp.weight for exp in self.experts],\n axis=0\n )\n\n return (\n (np.sum(preds, axis=0) / sum_weights) >= .5\n ).astype(int)", "def predict(self, X):\n pred = np.zeros(X.shape[0])\n ### YOUR CODE HERE 1-3 lines\n probabilities = np.array([model.probability(X) for model in self.models])\n pred=np.argmax(probabilities, axis=0)\n ### END CODE\n assert pred.shape == (X.shape[0],)\n return pred", "def test_predict_func(self):\n ve = VariogramEstimator(n_lags=15, normalize=False).fit(self.c, self.v)\n v = ve.variogram\n\n x = np.linspace(0, ve.range_, 100)\n\n assert_array_almost_equal(ve.predict(x), v.transform(x), decimal=6)", "def fit_predict(self):\n raise AttributeError", "def test_valid_prediction(alpha: Any) -> None:\n model = LogisticRegression(multi_class=\"multinomial\")\n model.fit(X_toy, y_toy)\n mapie = MapieClassifier(estimator=model, cv=\"prefit\")\n mapie.fit(X_toy, y_toy)\n mapie.predict(X_toy, alpha=alpha)", "def predict(self, X):\n raise NotImplementedError('Abstract method \"predict\" must be '\n 'specialised!')", "def predict(self, X):\n check_is_fitted(self, ['estimators_', 'final_estimator_'])\n return self.final_estimator_.predict(self.transform(X))", "def validate(inputs):\n print \"running validation\"\n my_data = genfromtxt(inputs, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #processing data without targets\n X = preprocessing.scale(np.hsplit(my_data,[n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[n_features,n_col])[1]))\n #for further information about parameters, please google sklearn docs\n parameters = {'kernel':('sigmoid', 'rbf'), 'C':[.1,.2,1.0],'cache_size':[500]}\n svr = svm.SVC()\n clf = grid_search.GridSearchCV(svr, parameters,n_jobs=3)\n sys.stdout.write(\"%s:validating... \"%(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())))\n output = clf.fit(X,Y)\n print output\n print \"(%s) DONE.\" % (strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n sys.exit(0)", "def predict_proba(self, x):\n return self.predict(x, probs=True)", "def crossValidatePredictor(X,y,clfinputs,logfile='cvout.log'):\n \n models,modelcvs,preds,probs = [],[],[],[]\n scores = dict([(key,[]) for key in list(scorefn.keys())])\n errors = dict([(key,[]) for key in list(errorfn.keys())])\n\n # validate class labels\n uy = np.unique(y)\n if len(uy) != 2:\n print('error: need 2 classes for classification!')\n return {}\n \n N,ymin = len(y),uy[0]\n\n if cv_type == 'loo':\n cv = KFold(N,n_folds=N,random_state=train_state)\n y_pred = np.zeros(N)\n y_prob = np.zeros(N)\n else: \n cv = StratifiedKFold(y,n_folds=train_folds,random_state=train_state)\n\n n_folds = len(cv) \n model_id = clfinputs['clf_type']\n widgets = ['%s cv: '%cv_type, Percentage(), ' ', Bar('='), ' ', ETA()]\n pbar = ProgressBar(widgets=widgets, maxval=n_folds+(cv_type=='loo')).start()\n with open(logfile,'w') as logfid:\n cv_test_index = []\n scorekeys = sorted(scores.keys())\n for i,(train_index,test_index) in enumerate(cv):\n pbar.update(i)\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n cv_test_index.extend(test_index) \n \n # xgb assumes labels \\in {0,1}\n if model_id == 'xgb' and ymin == -1: \n y_train[y_train==-1] = 0 \n\n # train/predict as usual\n clf,clf_cv = train(X_train,y_train,clfinputs)\n clf_pred = clf.predict(X_test)\n if model_id == 'xgb' and ymin == -1:\n clf_pred[clf_pred==0] = -1\n\n if cv_probs:\n clf_prob = clf.predict_proba(X_test)[:,0]\n else:\n clf_prob = np.ones(len(clf_pred))*np.nan\n \n # loo predicts one label per 'fold'\n if cv_type == 'loo':\n\n y_pred[test_index] = clf_pred\n y_prob[test_index] = clf_prob\n # compute scores for the points we've classified thus far\n y_test_cur = np.atleast_1d(y[cv_test_index])\n y_pred_cur = np.atleast_1d(y_pred[cv_test_index])\n \n for score,score_fn in list(scorefn.items()):\n scorei = score_fn(y_test_cur,y_pred_cur,uy)\n scores[score] = [scorei] \n else:\n # collect output for all test samples in this fold\n for score,score_fn in list(scorefn.items()):\n scorei = score_fn(y_test,clf_pred,uy)\n scores[score].append(scorei) \n preds.append(clf_pred)\n probs.append(clf_prob)\n models.append(clf)\n modelcvs.append(clf_cv)\n for error,error_fn in list(errorfn.items()):\n errors[error].append(error_fn(y_test,clf_pred))\n\n if i==0:\n scorenames = ['%-16s'%score for score in scorekeys]\n logstr = '%-8s %s'%('i',''.join(scorenames))\n else:\n curscores = ['%-16.4f'%(np.mean(scores[score]))\n for score in scorekeys] \n logstr = '%-8.3g %s'%(i,''.join(curscores))\n print(logstr,file=logfid,flush=True)\n\n # train full model for loo cv, score on loo preds from above\n if cv_type == 'loo':\n for score,score_fn in list(scorefn.items()): \n scores[score] = [score_fn(y,y_pred,uy)]\n for error,error_fn in list(errorfn.items()):\n errors[error] = [error_fn(y,y_pred)]\n\n clf,clf_cv = train(X,y,clfinputs)\n models = [clf]\n modelcvs = [clf_cv]\n preds = [y_pred]\n probs = [y_prob]\n pbar.update(i+1)\n pbar.finish() \n\n # output scores ordered by key\n for score_id in scorekeys:\n score_vals = scores[score_id]\n print('mean %s: %7.4f (std=%7.4f)'%(score_id, np.mean(score_vals),\n np.std(score_vals)))\n\n return {'preds':preds,'probs':probs,'scores':scores,'errors':errors,\n 'models':models,'modelcvs':modelcvs}", "def validate(self):\n\n # start validate\n self.model.eval()\n preds, labels = [], []\n for batch_idx, data in enumerate(self.valid_dataloader):\n # calculate and log losses\n losses_report, valid_preds, valid_labels = self.forward_one_batch(\n data)\n self._update_losses(losses_report, train=False)\n\n preds.append(valid_preds)\n labels.append(valid_labels)\n\n preds = np.concatenate(preds, axis=0)\n labels = np.concatenate(labels, axis=0)\n if IS_REG:\n preds = disc(preds)\n # calculate and log metrics\n metrics_report = self.evaluate_metrics(preds, labels)\n self._update_metrics(metrics_report, train=False)\n\n # TODO: lr scheduler step setting\n self.lr_scheduler.step(self.valid_loss_meters['CrossEntropyLoss'].avg)\n\n # end validate\n self.model.train()", "def _predict(self, x):\n pass", "def test_using_predict(self):\n [X, labels, Y] = self.gen_data()\n # Call algorithm\n bias = multiLogReg(self.sds.from_numpy(\n X), self.sds.from_numpy(Y), verbose=False).compute()\n\n [m, y_pred, acc] = multiLogRegPredict(self.sds.from_numpy(\n X), self.sds.from_numpy(bias), self.sds.from_numpy(Y), verbose=False).compute()\n\n self.assertTrue(acc > 98)", "def cross_validate(estimator: BaseEstimator, X: pd.DataFrame, y: pd.DataFrame, num_splits: int, save_name: str) -> None:\r\n splitter = StratifiedKFold(n_splits=num_splits, shuffle=True, random_state=0)\r\n\r\n predictions = {\"test\": [], \"train\": []}\r\n y_true = {\"test\": [], \"train\": []}\r\n\r\n for train_index, test_index in splitter.split(X, y):\r\n estimator.fit(X.iloc[train_index, :], y.iloc[train_index, 0])\r\n test_pred = estimator.predict(X.iloc[test_index, :])\r\n train_pred = estimator.predict(X.iloc[train_index, :])\r\n\r\n predictions[\"train\"].append(train_pred)\r\n predictions[\"test\"].append(test_pred)\r\n\r\n y_true[\"train\"].append(np.array(y.iloc[train_index])[:, 0])\r\n y_true[\"test\"].append(np.array(y.iloc[test_index])[:, 0])\r\n\r\n error_profile(y_true, predictions, model_type=save_name)", "def predict_proba(self, X):\n X = check_array(X, dtype=np.float64)\n prob = []\n for i, x in enumerate(X):\n prob.append(self._max_like_est(x))\n return np.array(prob)", "def predict_proba(self, x):\n scores = self.forward(x)\n scores = F.softmax(scores, dim=1)\n probs, predictions = scores.max(1)\n return probs, predictions", "def predict_proba(self, x, **kwargs):\n kwargs = self.filter_sk_params(Sequential.predict_proba, kwargs)\n probs = self.model.predict_proba(x, **kwargs)\n\n # check if binary classification\n if probs.shape[1] == 1:\n # first column is probability of class 0 and second is of class 1\n probs = np.hstack([1 - probs, probs])\n return probs", "def predict(self, X):\n result = None\n for idx in range(self.width):\n if self.preprocessors[idx] is not None:\n X_new = self.preprocessors[idx].transform(X)\n else:\n X_new = X\n\n if self.proba[idx]:\n if _method_checker(self.models[idx], 'predict_proba'):\n temp_result = self.models[idx].predict_proba(X_new)\n else:\n warnings.warn(\"\"\"Warning: predict_proba not exist for {},\n using predict instead\"\"\".format(\n self.models[idx].__class__))\n temp_result = self.models[idx].predict(X_new)\n temp_result = np.expand_dims(temp_result, axis=1)\n else:\n temp_result = self.models[idx].predict(X_new)\n temp_result = np.expand_dims(temp_result, axis=1)\n\n if result is None:\n result = temp_result\n else:\n result = np.concatenate((result, temp_result), axis=1)\n return result", "def fit_predict(self, x_data, y_data, custom_kfold=None, regressor=False):\n if regressor:\n self.r2_scores, self.mse_scores = self.predict_and_cv_score_regression(x_data, y_data, custom_kfold)\n else:\n self.f1_scores, self.recall_scores, self.precision_scores, self.accuracy_scores = self.predict_and_cv_score(x_data, y_data, custom_kfold)", "def predict(self, x):\n \n\n return predictions", "def predict(self, X: List[np.ndarray], **kwargs) -> List[np.ndarray]:", "def fit_predict_single_fold(\n self, train: TabularDataset, valid: TabularDataset\n ) -> Tuple[LinearEstimator, np.ndarray]:\n if type(train) is PandasDataset:\n train = train.to_numpy()\n valid = valid.to_numpy()\n\n _model, cs, l1_ratios, early_stopping = self._infer_params()\n\n train_target, train_weight = self.task.losses[\"sklearn\"].fw_func(train.target, train.weights)\n valid_target, valid_weight = self.task.losses[\"sklearn\"].fw_func(valid.target, valid.weights)\n\n model = deepcopy(_model)\n\n best_score = -np.inf\n best_pred = None\n best_model = None\n\n metric = self.task.losses[\"sklearn\"].metric_func\n\n for l1_ratio in sorted(l1_ratios, reverse=True):\n\n try:\n model.set_params(**{\"l1_ratio\": l1_ratio})\n except ValueError:\n pass\n\n model = deepcopy(_model)\n\n c_best_score = -np.inf\n c_best_pred = None\n c_best_model = None\n es = 0\n\n for n, c in enumerate(cs):\n\n try:\n model.set_params(**{\"C\": c})\n except ValueError:\n model.set_params(**{\"alpha\": c})\n\n model.fit(train.data, train_target, train_weight)\n\n if np.allclose(model.coef_, 0):\n if n == (len(cs) - 1):\n logger.info2(\n \"All model coefs are 0. Model with l1_ratio {0} is dummy\".format(l1_ratio),\n UserWarning,\n )\n else:\n logger.debug(\"C = {0} all model coefs are 0\".format(c))\n continue\n\n pred = self._predict_w_model_type(model, valid.data)\n score = metric(valid_target, pred, valid_weight)\n\n logger.debug(\"C = {0}, l1_ratio = {1}, score = {2}\".format(c, 1, score))\n\n # TODO: check about greater and equal\n if score >= c_best_score:\n c_best_score = score\n c_best_pred = deepcopy(pred)\n es = 0\n c_best_model = deepcopy(model)\n else:\n es += 1\n\n if es >= early_stopping:\n logger.debug(\"Early stopping..\")\n break\n\n if self.timer.time_limit_exceeded():\n logger.info3(\"Time limit exceeded\")\n break\n\n # TODO: Think about is it ok to check time inside train loop?\n if (model.coef_ != 0).all():\n logger.debug(\"All coefs are nonzero\")\n break\n\n if c_best_score >= best_score:\n best_score = c_best_score\n best_pred = deepcopy(c_best_pred)\n best_model = deepcopy(c_best_model)\n\n if self.timer.time_limit_exceeded():\n logger.info3(\"Time limit exceeded\")\n break\n\n val_pred = self.task.losses[\"sklearn\"].bw_func(best_pred)\n\n return best_model, val_pred", "def predict(self, X):\n return self.opt.eval(X)", "def predict(self, X):\n\n X = np.asarray(X)\n if X.ndim == 1:\n# X = np.asarray([X]).T\n # Data has a single feature\n X = X.reshape(-1, 1)\n\n # Map the data with RBF kernel\n Din = dist.cdist(X, self.X, 'sqeuclidean')\n X_map = np.exp(-self.gamma_in * Din)\n\n # Prediction\n pred = self.reg.predict(X_map)\n if self.location:\n pred += self.lsr.predict(X_map) * self.std_residue + \\\n self.mean_residue\n# pred += self.lsr.predict(X_map) if self.location else 0\n\n return pred", "def _modified_cross_validate(self, X, y, return_regressor=False,\n error_score=np.nan, return_incumbent_score=False):\n\n # Base boosting has already validated the data\n if not hasattr(self, '_baseboostcv'):\n X, y = _validate_data(X=X, y=y)\n\n X, y, groups = sklearn.utils.validation.indexable(X, y, None)\n\n if not hasattr(self, 'pipe'):\n n_samples = _n_samples(y)\n fold_size = np.full(shape=n_samples, fill_value=n_samples // self.cv,\n dtype=np.int)\n estimate_fold_size = n_samples - (np.max(fold_size) + 1)\n self.get_pipeline(y=y, n_quantiles=estimate_fold_size)\n\n cv = sklearn.model_selection._split.check_cv(cv=self.cv, y=y, classifier=self.pipe)\n\n scorers, _ = sklearn.metrics._scorer._check_multimetric_scoring(estimator=self.pipe,\n scoring=self.scoring)\n\n parallel = joblib.Parallel(n_jobs=self.n_jobs, verbose=self.verbose,\n pre_dispatch='2*n_jobs')\n\n scores = parallel(joblib.delayed(sklearn.model_selection._validation._fit_and_score)(\n estimator=sklearn.base.clone(self.pipe), X=X, y=y, scorer=scorers,\n train=train, test=test, verbose=self.verbose, parameters=None,\n fit_params=None, return_train_score=self.return_train_score,\n return_parameters=False, return_n_test_samples=False,\n return_times=True, return_estimator=return_regressor,\n error_score=np.nan)\n for train, test in cv.split(X, y, groups))\n\n if return_incumbent_score:\n if self.target_index is not None:\n y_pred = X.iloc[:, self.target_index]\n else:\n y_pred = X\n\n incumbent_test_score = parallel(joblib.delayed(self.score)(\n y_true=y.loc[test], y_pred=y_pred.loc[test])\n for _, test in cv.split(X, y, groups))\n\n if self.scoring == 'neg_mean_absolute_error':\n incumbent_test_score = [score['mae'].values[0] for score in incumbent_test_score]\n elif self.scoring == 'neg_mean_squared_error':\n incumbent_test_score = [score['mse'].values[0] for score in incumbent_test_score]\n\n zipped_scores = list(zip(*scores))\n if self.return_train_score:\n train_scores = zipped_scores.pop(0)\n train_scores = sklearn.model_selection._validation._aggregate_score_dicts(train_scores)\n if return_regressor:\n fitted_regressors = zipped_scores.pop()\n test_scores, fit_times, score_times = zipped_scores\n test_scores = sklearn.model_selection._validation._aggregate_score_dicts(test_scores)\n\n ret = {}\n ret['fit_time'] = np.array(fit_times)\n ret['score_time'] = np.array(score_times)\n\n if return_regressor:\n ret['regressor'] = fitted_regressors\n\n for name in scorers:\n ret['test_%s' % name] = np.array(test_scores[name])\n if self.return_train_score:\n key = 'train_%s' % name\n ret[key] = np.array(train_scores[name])\n\n if return_incumbent_score:\n ret['incumbent_test_score'] = incumbent_test_score\n\n return ret", "def fit_and_predict(self, X_train, y_train, X_test, y_test):\n if self.feature_transform_func:\n X_train, X_test = self.feature_transform_func(X_train, X_test)\n\n self.fit(X_train, y_train)\n y_predict = self.predict(X_test)\n return self.Acu_eval(y_predict, y_test)", "def predict(self, X):\r\n return 1 if self.predict_prob(X) > 0.5 else 0", "def fit_predict_single_fold(\n self, train: TabularDataset, valid: TabularDataset\n ) -> Tuple[TorchBasedLinearEstimator, np.ndarray]:\n if type(train) is PandasDataset:\n train = train.to_numpy()\n valid = valid.to_numpy()\n\n model = self._infer_params()\n\n model.fit(\n train.data,\n train.target,\n train.weights,\n valid.data,\n valid.target,\n valid.weights,\n )\n\n val_pred = model.predict(valid.data)\n\n return model, val_pred", "def predict_proba(self, X, pred_batch_size=None):\n outputs = self.decision_function(X, pred_batch_size)\n probs_positive = sigmoid(outputs)\n probs_negative = 1 - probs_positive\n probs = np.vstack((probs_negative.T, probs_positive.T))\n return probs.T", "def predict_proba(self, X):\n\n X = check_array(X)\n proba = np.zeros((X.shape[0], self.n_classes_))\n for i in xrange(self.n_estimators):\n proba += self.coef_[i] * self._tree_predict_proba(i, X)\n proba += self.intercept_\n return proba", "def predict(self, X):\n self._check_is_fitted('predict')\n return self.best_estimator_.predict(X)", "def predict(self, X):\n if self.protos_ is None:\n raise Exception(\"Prototypes not initialized. Perform a fit first.\")\n\n X = check_array(X)\n\n # predict\n return _predict(self.protos_, self.aggregation, self.classes_, X)" ]
[ "0.7027119", "0.689992", "0.6828031", "0.67726386", "0.67685646", "0.67366797", "0.6682446", "0.6593702", "0.65868783", "0.65765643", "0.6547521", "0.6528127", "0.6527484", "0.6483978", "0.64814615", "0.64814615", "0.64814615", "0.6477165", "0.6470081", "0.643283", "0.643283", "0.64195234", "0.64140725", "0.6396958", "0.63960004", "0.6392196", "0.6388045", "0.6383118", "0.6382813", "0.63634306", "0.63566715", "0.6330006", "0.6326249", "0.6319539", "0.6319277", "0.6295676", "0.62943757", "0.6287162", "0.62862813", "0.6254909", "0.625356", "0.6251607", "0.6250309", "0.6249473", "0.6249473", "0.6249473", "0.6239481", "0.62330884", "0.6220525", "0.62198776", "0.6217728", "0.61959547", "0.6192205", "0.6189841", "0.61838907", "0.61716694", "0.61712116", "0.61426777", "0.6132371", "0.61302024", "0.6127812", "0.6125058", "0.6124959", "0.61201644", "0.61113054", "0.6109722", "0.6101911", "0.6098017", "0.6085908", "0.60840034", "0.60692954", "0.6063102", "0.60539013", "0.60507953", "0.6049791", "0.6047511", "0.60438716", "0.6043211", "0.60375434", "0.6034467", "0.60342187", "0.60317117", "0.60312253", "0.60223323", "0.60173804", "0.6015634", "0.60124063", "0.6010229", "0.60087085", "0.6007075", "0.6000696", "0.59983784", "0.5997584", "0.59944737", "0.5993977", "0.5991639", "0.5990959", "0.5990565", "0.59860325", "0.5985155" ]
0.63079476
35
Fits the AutoML model with data
def _fit(self, X, y, sample_weight=None, cv=None, sensitive_features=None): if self._fit_level == "finished": print( "This model has already been fitted. You can use predict methods or select a new 'results_path' for a new a 'fit()'." ) return # Validate input and build dataframes X, y, sample_weight, sensitive_features = self._build_dataframe( X, y, sample_weight, sensitive_features ) self.n_rows_in_ = X.shape[0] self.n_features_in_ = X.shape[1] self.n_classes = len(np.unique(y[~pd.isnull(y)])) # Get attributes (__init__ params) self._mode = self._get_mode() self._ml_task = self._get_ml_task() self._results_path = self._get_results_path() self._total_time_limit = self._get_total_time_limit() self._model_time_limit = self._get_model_time_limit() self._algorithms = self._get_algorithms() self._train_ensemble = self._get_train_ensemble() self._stack_models = self._get_stack_models() self._eval_metric = self._get_eval_metric() self._validation_strategy = self._get_validation_strategy() self._verbose = self._get_verbose() self._explain_level = self._get_explain_level() self._golden_features = self._get_golden_features() self._features_selection = self._get_features_selection() self._start_random_models = self._get_start_random_models() self._hill_climbing_steps = self._get_hill_climbing_steps() self._top_models_to_improve = self._get_top_models_to_improve() self._boost_on_errors = self._get_boost_on_errors() self._kmeans_features = self._get_kmeans_features() self._mix_encoding = self._get_mix_encoding() self._max_single_prediction_time = self._get_max_single_prediction_time() self._optuna_time_budget = self._get_optuna_time_budget() self._optuna_init_params = self._get_optuna_init_params() self._optuna_verbose = self._get_optuna_verbose() self._n_jobs = self._get_n_jobs() self._random_state = self._get_random_state() if sensitive_features is not None: self._fairness_metric = self._get_fairness_metric() self._fairness_threshold = self._get_fairness_threshold() self._privileged_groups = self._get_privileged_groups() self._underprivileged_groups = self._get_underprivileged_groups() self._adjust_validation = False self._apply_constraints() if not self._adjust_validation: # if there is no validation adjustement # then we can apply stack_models constraints immediately # if there is validation adjustement # then we will apply contraints after the adjustement self._apply_constraints_stack_models() try: self.load_progress() if self._fit_level == "finished": print( "This model has already been fitted. You can use predict methods or select a new 'results_path' for a new 'fit()'." ) return self._check_can_load() self.verbose_print(f"AutoML directory: {self._results_path}") if self._mode == "Optuna": ttl = int(len(self._algorithms) * self._optuna_time_budget) self.verbose_print("Expected computing time:") self.verbose_print( f"Time for tuning with Optuna: len(algorithms) * optuna_time_budget = {int(len(self._algorithms) * self._optuna_time_budget)} seconds" ) self.verbose_print( f"There is no time limit for ML model training after Optuna tuning (total_time_limit parameter is ignored)." ) self.verbose_print( f"The task is {self._ml_task} with evaluation metric {self._eval_metric}" ) self.verbose_print(f"AutoML will use algorithms: {self._algorithms}") if self._stack_models: self.verbose_print("AutoML will stack models") if self._train_ensemble: self.verbose_print("AutoML will ensemble available models") self._start_time = time.time() if self._time_ctrl is not None: self._start_time -= self._time_ctrl.already_spend() # Automatic Exloratory Data Analysis # I disabled EDA, because it won't be supported # I recomend use pandas_profiling or Sweetviz # if self._explain_level == 2: # EDA.compute(X, y, os.path.join(self._results_path, "EDA")) # Save data self._save_data( X.copy(deep=False), y.copy(deep=False), None if sample_weight is None else sample_weight.copy(deep=False), cv, None if sensitive_features is None else sensitive_features.copy(deep=False), ) tuner = MljarTuner( self._get_tuner_params( self._start_random_models, self._hill_climbing_steps, self._top_models_to_improve, ), self._algorithms, self._ml_task, self._eval_metric, self._validation_strategy, self._explain_level, self._data_info, self._golden_features, self._features_selection, self._train_ensemble, self._stack_models, self._adjust_validation, self._boost_on_errors, self._kmeans_features, self._mix_encoding, self._optuna_time_budget, self._optuna_init_params, self._optuna_verbose, self._n_jobs, self._random_state, self._fairness_metric, self._fairness_threshold, self._privileged_groups, self._underprivileged_groups, ) self.tuner = tuner steps = tuner.steps() self.verbose_print( f'AutoML steps: {[s for s in steps if "update_" not in s]}' ) if self._time_ctrl is None: self._time_ctrl = TimeController( self._start_time, self._total_time_limit, self._model_time_limit, steps, self._algorithms, ) self._time_ctrl.log_time( "prepare_data", "prepare_data", "prepare_data", time.time() - self._start_time, ) for step in steps: self._fit_level = step start = time.time() # self._time_start[step] = start if step in ["stack", "ensemble_stacked"] and not self._stack_models: continue if step == "stack": self.prepare_for_stacking() if "hill_climbing" in step or step in ["ensemble", "stack"]: if len(self._models) == 0: raise AutoMLException( "No models produced. \nPlease check your data or" " submit a Github issue at https://github.com/mljar/mljar-supervised/issues/new." ) generated_params = [] if step in self._all_params: generated_params = self._all_params[step] else: generated_params = tuner.generate_params( step, self._models, self._results_path, self._stacked_models, self._total_time_limit, ) if generated_params is None or not generated_params: if "_update_" not in step: self.verbose_print( f"Skip {step} because no parameters were generated." ) continue if generated_params: if not self._time_ctrl.enough_time_for_step(self._fit_level): self.verbose_print(f"Skip {step} because of the time limit.") continue else: model_str = "models" if len(generated_params) > 1 else "model" self.verbose_print( f"* Step {step} will try to check up to {len(generated_params)} {model_str}" ) for params in generated_params: if params.get("status", "") in ["trained", "skipped", "error"]: self.verbose_print(f"{params['name']}: {params['status']}.") continue try: trained = False if "ensemble" in step: trained = self.ensemble_step( is_stacked=params["is_stacked"] ) else: trained = self.train_model(params) params["status"] = "trained" if trained else "skipped" params["final_loss"] = self._models[-1].get_final_loss() params["train_time"] = self._models[-1].get_train_time() if ( self._adjust_validation and len(self._models) == 1 and step == "adjust_validation" ): self._set_adjusted_validation() except NotTrainedException as e: params["status"] = "error" self.verbose_print( params.get("name") + " not trained. " + str(e) ) except Exception as e: import traceback self._update_errors_report( params.get("name"), str(e) + "\n" + traceback.format_exc() ) params["status"] = "error" self.save_progress(step, generated_params) if not self._models: raise AutoMLException("No models produced.") self._fit_level = "finished" self.save_progress() self.select_and_save_best(show_warnings=True) self.verbose_print( f"AutoML fit time: {np.round(time.time() - self._start_time,2)} seconds" ) self.verbose_print(f"AutoML best model: {self._best_model.get_name()}") if self._fairness_metric is not None: # check if we have fair model has_fair_model = False for m in self._models: if m.is_fair(): has_fair_model = True break if not has_fair_model: self.verbose_print( "AutoML can't construct model that meets your fairness criteria." ) self.verbose_print("What you can do?") self.verbose_print( "1. Please include more samples that are not biased." ) self.verbose_print( "2. Please examine the most unfairly treated samples." ) self.verbose_print("3. Please change fairness threshold.") except Exception as e: raise e return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_model():", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def build(self):\n labelled_documents = self.get_labelled_documents_queryset()\n\n self.model = self.build_model(labelled_documents)\n self.save_model()", "def build_model(self):\n pass", "def build_model(self):\n pass", "def initialize_model(self):\n pass", "def init_model(self):\n pass", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def load_model(self) -> Any:", "def initialize(self) -> None:\n self.model = load(self.path)", "def build_model(self):\n raise NotImplementedError", "def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return", "def __init__(self, **kwargs):\n\n # Identify the mode to start the model in\n if \"x\" in kwargs and \"y\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Input type is 'molecular descriptors'.\")\n self.__input_type = \"descriptors\" # other molecular descriptors\n\n # If scaling is required\n if kwargs.get(\"scaling\", False) is True:\n # Normalize the input\n print(\"Applying scaling on input.\")\n self.__scaler = StandardScaler()\n x = self.__scaler.fit_transform(x)\n else:\n self.__scaler = None\n\n # If PCA is required\n if kwargs.get(\"pca\", False) is True:\n print(\"Applying PCA on input.\")\n self.__pca = PCA(\n n_components=x.shape[1]\n ) # n_components=n_features for now\n x = self.__pca.fit_transform(x)\n else:\n self.__pca = None\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n if self.input_type == \"descriptors\":\n self.__codelayer_dim = x.shape[1] # features\n if \"codelayer_dim\" in kwargs:\n print(\n \"Ignoring requested codelayer_dim because it is inferred from the cardinality of the descriptors.\"\n )\n else:\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n \n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build all sub-models as untrained models\n if self.input_type == \"mols\":\n self.__build_mol_to_latent_model()\n else:\n self.__mol_to_latent_model = None\n\n self.__build_latent_to_states_model()\n self.__build_batch_model()\n\n # Build data generators\n self.__build_generators(x, y)\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n \n if self.mode == \"retrain\":\n # If scaling is required\n if self.scaler is not None:\n print(\"Applying scaling on input.\")\n x = self.scaler.transform(x)\n\n # If PCA is required\n if self.pca is not None:\n print(\"Applying PCA on input.\")\n x = self.pca.transform(x)\n \n # Build data generators\n self.__build_generators(x, y)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Show the resulting full model\n print(self.model.summary())", "def initialize(self):\n LOG.info(\"Initializing Model.\")\n self.model = self.convert(df=self.training_df)\n if self.bootstraps is not None:\n LOG.info(\"Bootstrapping Data.\")\n self.bootstrap_data()", "def MakeModel(self):\n pass", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def build_model_fn(self):", "def learn_models(self):\n\n influencers = self.influencers.infGroup\n\n self.complete_model = LanguageModel()\n self.influencer_models = { influencer: LanguageModel() for influencer in influencers }\n\n all_tweets = []\n # for influencer in tqdm(influencers, desc='Learning Models'):\n for influencer in influencers:\n tweets = [tweet for tweet in self.get_saved_tweets(influencer)]\n self.influencer_models[influencer].add_documents(tweets)\n all_tweets += tweets\n\n self.complete_model.add_documents(all_tweets)", "def train(self, data):\n pass", "def __init__(self):\n self.model = None", "def __init__(self):\n self.model = None", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def __init__(self, x_train, model):\n self.x_train = x_train\n self.model = model", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def fit_training_data(self):\n self.model.fit(self.X_train)", "def eval(self):\n self.train(mode=False)", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def __train_model(self):\n for i in range(self.file_index):\n logger.info(\"Training the ALS model dataset \" + str(i))\n self.als = ALS(maxIter=5, regParam=0.01, userCol=\"UserId\", itemCol=\"GameId\", ratingCol=\"Userscore\",\n coldStartStrategy=\"drop\")\n self.model[i] = self.als.fit(self.df[i])\n logger.info(\"ALS model built!\")", "def initiate(self):\n # if self.opt.checkpoint_encoder:\n # self.load(self.opt.checkpoint_encoder, self.opt.checkpoint_decoder)\n # else:\n # start fresh.\n self.model = Transformer(\n self.opt.src_vocab_size,\n self.opt.tgt_vocab_size,\n self.opt.max_token_seq_len,\n tgt_emb_prj_weight_sharing=self.opt.proj_share_weight,\n emb_src_tgt_weight_sharing=self.opt.embs_share_weight,\n d_k=self.opt.d_k,\n d_v=self.opt.d_v,\n d_model=self.opt.d_model,\n d_word_vec=self.opt.d_word_vec,\n d_inner=self.opt.d_inner_hid,\n n_layers=self.opt.layers,\n n_head=self.opt.n_head,\n dropout=self.opt.dropout).to(self.device)\n \n for p in self.model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)", "def train(self, ):\n raise NotImplementedError", "def training(self):\n self.model.fit(self.train_x, self.train_y)", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def initialize_model(self, initial_data):\n # EDIT THIS METHOD TO RETURN A MINIMAX MODEL ###\n return None", "def build(self):\n # open json, len 161,260\n at_json = open_json(self.json_names[0])\n link_json = open_json(self.json_names[1])\n # if need preprocessing, do it\n if self.args.img_preprocessing:\n print(\"resize imgs\")\n for i in tqdm(range(len(link_json))):\n image_url = \"image/\" + link_json[i][\"image_url_4x\"].split('/')[-1]\n img = Image.open(image_url)\n img = img.resize((224, 224))\n img.save(image_url)\n\n # create dataset\n itemlen = 0\n previd = 0\n for i in tqdm(range(len(link_json))):\n image_url = link_json[i][\"image_url_4x\"].split('/')[-1]\n uid = image_url.split('-')[0]\n if previd != uid:\n self.label.append(list(at_json[i].values())[2:])\n if i != 0:\n self.itemlen.append(itemlen)\n itemlen = 0\n self.input.append(f\"{self.frontpath}dataset/image/\" + image_url)\n previd = uid\n itemlen += 1\n self.itemlen.append(itemlen)\n self.separate()\n self.dataset = {\n 'train': self.train,\n 'validation': self.val,\n 'test': self.test\n }\n\n print('finished dataset')", "def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)", "def init_training(self):\n\n if not os.path.exists(self._model_root_path):\n os.makedirs(self._model_root_path)\n\n # Only initialize once!\n if self._model is None:\n self._model = TrainableAimbotModel(self._config, self._fov,\n os.path.join(self._model_root_path, 'aimbot_model.tf'))\n\n if not os.path.isfile(self._train_data_tfrecord_path) and not os.path.isfile(self._test_data_tfrecord_path):\n # Only create if not existing\n images_labels = _get_annotations_and_images(self._image_path)\n images_labels_train, images_labels_test = train_test_split(images_labels, shuffle=True, test_size=0.20)\n\n self._model.create_tfrecords(self._train_data_tfrecord_path, images_labels_train)\n self._model.create_tfrecords(self._test_data_tfrecord_path, images_labels_test)\n\n self._train_data_set = self._model.create_dataset(self._train_data_tfrecord_path, augment=True, shuffle=True)\n self._test_data_set = self._model.create_dataset(self._train_data_tfrecord_path)", "def load_model(self):\n pass", "def create_models( self ):", "def _setupModel(self, parameters):\r\n ModelFitterCore.setupModel(self.roadrunnerModel, parameters,\r\n logger=self.logger)", "def train(self) -> Any:\n pass", "def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)", "def load_model(self):\n self._logger.debug(f\"Loading Spacy Data Model : {self._model}... Could take time.\")\n self._nlp = spacy.load(self._model)\n self._logger.debug(\"Successfully loaded Spacy Data !\")\n\n # === Load entities ===\n if PIPE_ENTITY not in self._nlp.pipe_names:\n self._nlp.add_pipe(PIPE_ENTITY, last=True)\n\n entity_pipe = self._nlp.get_pipe(PIPE_ENTITY)\n for entity in self._entities:\n entity_pipe.add_label(entity)\n\n # === Load categories ===\n if PIPE_INTENT not in self._nlp.pipe_names:\n self._nlp.add_pipe(PIPE_INTENT, last=True)\n\n intent_pipe = self._nlp.get_pipe(PIPE_INTENT)\n for intent in self._intents:\n intent_pipe.add_label(intent)", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def trainModel( self, featureTrain, classTrain):", "def __init__(self, model: Any, data: Iterable[Any]):\n super().__init__(model, data)\n\n self.__score = None\n self.__summary = None\n\n self.calculate_scores()", "def set_model(self, model):\n if isinstance(model, edmx.Document):\n doc = model\n model = model.root\n elif isinstance(model, edmx.Edmx):\n # create a document to hold the model\n doc = edmx.Document(root=model)\n else:\n raise TypeError(\"Edmx document or instance required for model\")\n # update the base URI of the metadata document to identify this service\n doc.set_base(self.service_root)\n if self.model:\n # get rid of the old model\n for c in self.ws.Collection:\n c.detach_from_doc()\n c.parent = None\n self.ws.Collection = []\n for s in model.DataServices.Schema:\n for container in s.EntityContainer:\n if container.is_default_entity_container():\n prefix = \"\"\n else:\n prefix = container.name + \".\"\n # define one feed for each entity set, prefixed with the name\n # of the entity set\n for es in container.EntitySet:\n feed = self.ws.add_child(app.Collection)\n feed.href = prefix + es.name\n feed.add_child(atom.Title).set_value(prefix + es.name)\n # update the locations following SetBase above\n es.set_location()\n self.model = model", "def buildModel( self, transformer, classifier ):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X , self.y )\n joblib.dump ( summitAIModel , self.modeldump )", "def _initialize_model(self):\n max_value = self.data.max()\n\n if self.model_type == self._GAUSSIAN2D:\n model = models.Gaussian2D(\n x_mean=self.x, y_mean=self.y, x_stddev=1, y_stddev=1\n )\n model.amplitude = max_value\n\n # Establish reasonable bounds for the fitted parameters\n model.x_stddev.bounds = (0, self._box / 4)\n model.y_stddev.bounds = (0, self._box / 4)\n model.x_mean.bounds = (self.x - 5, self.x + 5)\n model.y_mean.bounds = (self.y - 5, self.y + 5)\n\n elif self.model_type == self._MOFFAT2D:\n model = models.Moffat2D()\n model.x_0 = self.x\n model.y_0 = self.y\n model.gamma = 2\n model.alpha = 2\n model.amplitude = max_value\n\n # Establish reasonable bounds for the fitted parameters\n model.alpha.bounds = (1, 6)\n model.gamma.bounds = (0, self._box / 4)\n model.x_0.bounds = (self.x - 5, self.x + 5)\n model.y_0.bounds = (self.y - 5, self.y + 5)\n\n model += models.Const2D(self.fit_sky())\n model.amplitude_1.fixed = True\n return model", "def __init__(self, model: Model1D):\n self._model = model", "def initialize(self, model):\n pass", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def train(self):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n self.log(f\"{self.cur_file_path}\\t\\tInfo: training {self.model.__class__.__name__} model!\")\n\n self.model.fit(self.trainX, self.trainY)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def trainModels():\n\n # load actives from ChEMBL\n actives = {}\n if not os.path.exists(DATA_FOLDER_PATH):\n os.mkdir(DATA_FOLDER_PATH)\n actives_file = [x for x in os.listdir(DATA_FOLDER_PATH) if x.startswith('actives_chembl') and x.endswith('.p')]\n if not actives_file or RELOAD_DATA and not USE_DOWNLOADED_STRUCTS:\n actives = chembl.loadChEMBLData(ACCESSION, IC_50_THRESHOLD, DATA_FOLDER_PATH)\n else:\n actives = pickle.load(open(DATA_FOLDER_PATH + actives_file[0], 'rb'))\n\n if not actives_file or RELOAD_DATA and not USE_DOWNLOADED_STRUCTS:\n chembl.computeConsensualIC50(actives, DATA_FOLDER_PATH)\n chembl.appendRDKitMols(actives, DATA_FOLDER_PATH)\n\n # load decoys downloaded from DUD\n decoys = {}\n if os.path.exists(DECOYS_SDF_FILE_PATH[:-4] + \".p\"):\n decoys = pickle.load(open(DECOYS_SDF_FILE_PATH[:-4] + \".p\", 'rb'))\n else:\n if os.path.exists(DECOYS_SDF_FILE_PATH):\n decoys = dud.getDecoys(DECOYS_SDF_FILE_PATH)\n else:\n print \"Decoys not found in: \" + DECOYS_SDF_FILE_PATH\n print \"Make sure you set the right path.\"\n exit()\n\n # merge both data sets\n compounds_all = {}\n compounds_all.update(actives)\n compounds_all.update(decoys)\n\n # compute Morgan fingerprints\n if os.path.exists(MERGED_DATASET_PATH) and not RELOAD_DATA:\n print \"Loading previously created dataset...\"\n compounds_all = pickle.load(open(MERGED_DATASET_PATH, 'rb'))\n else:\n fingerprinter.appendMorganFingerprints(compounds_all)\n\n actives = { cmpndid : compounds_all[cmpndid] for cmpndid in compounds_all.keys() if compounds_all[cmpndid]['active']}\n pickle.dump(actives, open(ACTIVES_DUMP, 'wb'))\n decoys = { cmpndid : compounds_all[cmpndid] for cmpndid in compounds_all.keys() if not compounds_all[cmpndid]['active']}\n\n # train and cross-validate multiple Naive Bayes Classifiers\n classification_results = dict()\n if not os.path.exists(CLASS_RESULTS_SAVE_FILE_PATH) or RELOAD_DATA:\n classification_results = classification.naiveBayesClassifierTraining(compounds_all)\n print \"Saving results...\"\n pickle.dump(classification_results, open(CLASS_RESULTS_SAVE_FILE_PATH, 'wb'))\n print \"Finished analysis.\"\n else:\n print \"Loading previous results...\"\n classification_results = pickle.load(open(CLASS_RESULTS_SAVE_FILE_PATH, 'rb'))\n\n # have fun with the classification results\n print \"# CLASSIFICATION STATISTICS #\"\n classification.playWithResults(classification_results)\n\n # cluster actives according to their similarity and keep only the diverse molecules\n actives_testset = dict()\n if CLUSTER:\n clusters = utilities.clusterMols(actives)\n actives_kept = dict()\n for cluster in clusters:\n actives_kept[cluster[0]] = actives[cluster[0]]\n remains = cluster[1:]\n actives_filtered_out = {chmblid : actives[chmblid] for chmblid in remains}\n actives_testset.update(actives_filtered_out)\n actives = actives_kept\n\n # estimate maximum distances between active molecules to set threshold for the application domain\n # distance_actives = regression.estimateDistanceThreshold(actives) # median of distances between two actives\n # min_distance_decoys, max_distance_decoys = regression.compareDistances(actives, decoys) # average min/max distance of closest/farthest decoy from any of the actives\n # print \"median of distances between two actives: \" + str(distance_actives)\n # print \"average min/max distance of closest/farthest decoy from any of the actives: \" + str(min_distance_decoys) + \"/\" + str(max_distance_decoys)\n\n # Support vector regression\n regression_results = dict()\n if not os.path.exists(REGRESS_RESULTS_SAVE_FILE_PATH) or RELOAD_DATA:\n regression_results = regression.supportVectorRegression(actives)\n pickle.dump(regression_results, open(REGRESS_RESULTS_SAVE_FILE_PATH, 'wb'))\n else:\n regression_results = pickle.load(open(REGRESS_RESULTS_SAVE_FILE_PATH, 'rb'))\n\n\n # do something with the regression results\n print \"# REGRESSION STATISTICS #\"\n regression.playWithResults(regression_results, decoys, actives_testset)\n\n return classification_results['final_model'], regression_results['final_model']", "def _build_model(self):\n raise NotImplementedError()", "def prepareData(self):\n\t\tprint ('')\n\t\tfrom keras.preprocessing.sequence import pad_sequences\n\t\tfrom sklearn.model_selection import train_test_split\n\t\tfrom keras.utils import to_categorical\n\t\timport numpy as np\n\n\t\tfrom sklearn.preprocessing import LabelBinarizer, LabelEncoder\n\n\t\tX_snt = [[self.word2idx[w] if w in self.word2idx else self.word2idx[self.word_unk_token] for w in s] for s in self.x_document]\n\t\ty_tag = [[self.tag2idx[t]] for t in self.y_document]\n\n\t\tX_snt = pad_sequences(maxlen=self.parameters['max_doc_len'], sequences=X_snt, padding='post', value=self.word2idx[self.word_pad_token])\n\t\ty_tag = to_categorical(y_tag, self.tags_len)\n\n\t\tprint (\"\\tRandom:\\t\", self.random)\n\t\tprint (\"\\tTest size:\\t\", self.split_train_test)\n\n\t\tself.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X_snt, y_tag, test_size=self.split_train_test, random_state=self.random)\n\n\t\tself.X_train = np.array(self.X_train)\n\t\tself.X_test = np.array(self.X_test)\n\t\tself.y_train = np.array(self.y_train)\n\t\tself.y_test = np.array(self.y_test)\n\n\t\tprint ('\\n\\tWords: {}\\t{}'.format(self.X_train.shape, self.X_test.shape) )\n\t\tprint ('\\tTags: {}\\t{}\\n'.format(self.y_train.shape, self.y_test.shape))", "def __init__(self, data_set, model, config):\n\n self.config = config\n self.data_set = data_set\n # Normalize or standardize the features, to have them ready to use as model input\n self.data_set.shift_and_scale(self.config[\"shift\"], self.config[\"scaling\"])\n self.model = model\n self.model.eval()\n self.device = torch.device(\"cpu\") if not self.config[\"use_gpu\"] \\\n else torch.device(\"cuda:\" + str(self.config[\"gpu_no\"]))", "def auto_ml():\r\n # Reading from file\r\n my_data = my_reader(config.filename, separ=config.file_separ)\r\n\r\n # Binary and Unary columns search\r\n is_binary_list = is_binary(my_data)\r\n is_unary_list = is_unary(my_data)\r\n\r\n # Time columns search\r\n is_time_list = is_time(my_data)\r\n\r\n # To dummy\r\n my_data = to_dummies(my_data)\r\n\r\n # Train-test split\r\n train_df, test_df = \\\r\n my_train_test_split(my_data, act_test_size=config.test_size)\r\n\r\n # Pure numbers will be the input variables\r\n input_vars = to_pure_numbers(my_data)\r\n\r\n # Choosing if it is a regression or classification\r\n global regression, classification\r\n regression, classification = guess_goal(my_data, config.target)\r\n\r\n # Modelling and building the pipeline\r\n n_neighbors = 15\r\n x_df = train_df[input_vars]\r\n if regression:\r\n pipe_1 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', LinearRegression(fit_intercept=True))])\r\n pipe_2 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model',\r\n neighbors.KNeighborsRegressor(n_neighbors,\r\n weights='distance'))])\r\n pipe_3 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.BayesianRidge())])\r\n pipe_4 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.SGDRegressor())])\r\n pipe_5 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.ElasticNet())])\r\n pipe_6 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.Ridge())])\r\n pipe_7 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.Lasso())])\r\n pipe_8 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', RandomForestRegressor(max_depth=2,\r\n random_state=0,\r\n n_estimators=100))])\r\n pipe_dict = {0: 'LinearRegression',\r\n 1: 'KNeighborsRegressor',\r\n 2: 'BayesianRidge',\r\n 3: 'SGDRegressor',\r\n 4: 'ElasticNet',\r\n 5: 'Ridge',\r\n 6: 'Lasso',\r\n 7: 'RandomForestRegressor'}\r\n\r\n if classification:\r\n pipe_1 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', LogisticRegression(random_state=42))])\r\n pipe_2 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model',\r\n neighbors.KNeighborsClassifier(n_neighbors))])\r\n pipe_3 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', RandomForestClassifier(n_estimators=100,\r\n max_depth=2,\r\n random_state=0))])\r\n pipe_4 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.SGDClassifier())])\r\n pipe_5 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', MLPClassifier())])\r\n pipe_6 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', GradientBoostingClassifier())])\r\n pipe_7 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', GaussianNB())])\r\n pipe_8 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', SVC(gamma='auto'))])\r\n pipe_dict = {0: 'LogisticRegression',\r\n 1: 'KNeighborsClassifier',\r\n 2: 'RandomForestClassifier',\r\n 3: 'SGDClassifier',\r\n 4: 'MLPClassifier',\r\n 5: 'GradientBoostingClassifier',\r\n 6: 'GaussianNB',\r\n 7: 'SVC'}\r\n\r\n # List of pipelines\r\n pipelines = [pipe_1, pipe_2, pipe_3, pipe_4, pipe_5, pipe_6, pipe_7, pipe_8]\r\n\r\n # Fit the pipelines\r\n for pipe in pipelines:\r\n pipe.fit(x_df, train_df[config.target])\r\n\r\n # Is there outlier\r\n outlier_bool = is_outlier(x_df)\r\n\r\n corr_df = x_df.corr()\r\n\r\n # Open new file\r\n result_path = './test_eval/Result_params_' +\\\r\n str(config.filename.split(\"/\")[-1].split(\".\")[0]) + '.txt'\r\n result_file = open(result_path, 'w')\r\n result_file.write(\"Filename: \" + str(config.filename) + '\\n')\r\n result_file.write(\"Target: \" + str(config.target) + '\\n')\r\n if regression:\r\n result_file.write(\"Prediction type: Regression\" + '\\n')\r\n else:\r\n result_file.write(\"Prediction type: Classification\" + '\\n')\r\n result_file.write(\"Test size: \" + str(config.test_size*100) + \"%\" + '\\n')\r\n result_file.write(\"Model input columns: \" + str(input_vars) + '\\n')\r\n result_file.write(\"Used preparations: \" + '\\n')\r\n if config.missing_bool:\r\n result_file.write(\"Missing value handle (\" +\r\n str(config. missing_value_handle) +\r\n \"), \")\r\n if config.min_scaler_bool:\r\n result_file.write(\"Min scaling, \")\r\n if config.standardize_bool:\r\n result_file.write(\"Standardize, \")\r\n if config.to_dummies:\r\n result_file.write(\"To dummies\")\r\n result_file.write('\\n' + \"Discretize columns: \" +\r\n str(config.discretize) + '\\n')\r\n result_file.write(\"Binary columns: \" + str(is_binary_list) + '\\n')\r\n result_file.write(\"Unary columns: \" + str(is_unary_list) + '\\n')\r\n result_file.write(\"Time columns: \" + str(is_time_list) + '\\n')\r\n if outlier_bool:\r\n result_file.write(\"There is outlier in the data.\" + '\\n')\r\n\r\n # Evaluation\r\n result_df = pd.DataFrame()\r\n result_cols = []\r\n for idx, val in enumerate(pipelines):\r\n result_df = pd.concat([result_df,\r\n my_evaluation(val.predict(test_df[input_vars]),\r\n test_df[config.target])])\r\n result_cols.append(pipe_dict[idx])\r\n\r\n result_df.index = result_cols\r\n result_file.close()\r\n\r\n with pd.ExcelWriter(\"./test_eval/Evaluation_\"\r\n + str(config.filename.split(\"/\")[-1].split(\".\")[0])\r\n + \".xlsx\") as writer:\r\n if regression:\r\n result_df.to_excel(writer, sheet_name=\"Regression\")\r\n else:\r\n result_df.to_excel(writer, sheet_name=\"Classification\")\r\n corr_df.to_excel(writer, sheet_name=\"Correlation\")", "def convert_to_model(self, *args):", "def train(self):\n\t\traise NotImplementedError", "def build_model_from_inputs(self):\n if self.term_list is None:\n # no supplied token list -- use vocabulary of the training dataset\n # self.term_list = self.vocabulary\n # info(\"Setting bag dimension to {} from input vocabulary.\".format(len(self.term_list)))\n # will generate the vocabulary from the input\n pass\n info(f\"Building {self.name} model\")\n bagger = None\n if self.config.max_terms is not None:\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range, max_terms=self.config.max_terms)\n else:\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range)\n\n train_idx = self.indices.get_train_instances()\n texts = Text.get_strings(self.text.data.get_slice(train_idx))\n bagger.map_collection(texts, fit=True, transform=False)\n self.term_list = bagger.get_vocabulary()\n\n self.dimension = len(self.term_list)\n self.config.dimension = self.dimension", "def read_model(self):\n f = open(self.name + '_' + 'words', 'r')\n self.words = f.read()\n f.close()\n elf.words = dict(eval(self.words))\n \n f = open(self.name + '_' + 'word_lengths', 'r')\n self.word_lengths = f.read()\n f.close()\n self.word_lengths = dict(eval(self.word_lengths))\n\n f = open(self.name + '_' + 'sentence_lengths', 'r')\n self.sentence_lengths = f.read()\n f.close()\n self.sentence_lengths = dict(eval(self.sentence_lengths))\n\n f = open(self.name + '_' + 'stems', 'r')\n self.stems = f.read()\n f.close()\n self.stems = dict(eval(self.stems))\n\n f = open(self.name + '_' + 'commas_per_sentence', 'r')\n self.commas_per_sentence = f.read()\n f.close()\n self.commas_per_sentence = dict(eval(self.commas_per_sentence))", "def train(self, absList, modelfilename):\n raise NotImplementedError(\"Need to implement train()\")", "def train(self, absList, modelFilename):\n pass", "def _construct_model(self):\n self.model = AutoEncoderConvolutional(self.n_latent_features, self.reduced_size)\n self.model = self.model.to(self.device, non_blocking=True)", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')", "def build_model ( self, transformer, classifier, dumpfile ) :\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X , self.y )\n joblib.dump ( summitAIModel , dumpfile )", "def load_model_meta(meta_path):\r\n xlsx = pd.ExcelFile(meta_path)\r\n \r\n #The zooming factor is saved in the UsedData sheet\r\n meta = pd.read_excel(xlsx,sheet_name=\"UsedData\")\r\n zoom_factor = meta[\"zoom_factor\"].iloc[0]#should images be zoomed before forwarding through neural net?\r\n\r\n meta = pd.read_excel(xlsx,sheet_name=\"Parameters\")\r\n\r\n try:\r\n model_type = meta[\"Chosen Model\"].iloc[0]#input dimensions of the model\r\n except:\r\n model_type = \"Unknown\"\r\n \r\n try:\r\n target_imsize = meta[\"Input image crop\"].iloc[0]#input dimensions of the model\r\n except:\r\n target_imsize = meta[\"Input image size\"].iloc[0]#input dimensions of the model\r\n\r\n normalization_method = meta[\"Normalization\"].iloc[0]#normalization method\r\n if normalization_method == \"StdScaling using mean and std of all training data\": \r\n mean_trainingdata = meta[\"Mean of training data used for scaling\"]\r\n std_trainingdata = meta[\"Std of training data used for scaling\"]\r\n else:\r\n mean_trainingdata = None\r\n std_trainingdata = None\r\n \r\n #Following parameters may not exist in meta files of older AID versions. Hence try/except\r\n\r\n #Color mode: grayscale or RGB?\r\n try:\r\n target_channels = meta[\"Color Mode\"].iloc[0]\r\n except:\r\n target_channels = \"grayscale\"\r\n if target_channels.lower() ==\"grayscale\":\r\n target_channels = 1\r\n elif target_channels.lower() ==\"rgb\":\r\n target_channels = 3\r\n\r\n #The order for the zooming operation\r\n try:\r\n zoom_interpol_method = meta[\"Zoom order\"].iloc[0]\r\n except:\r\n zoom_interpol_method = \"cv2.INTER_NEAREST\"\r\n #Translate zoom_interpol_method to OpenCV argument\r\n if \"cv2.\" not in str(zoom_interpol_method):\r\n zoom_interpol_method = zoom_arguments_scipy2cv(zoom_factor,zoom_interpol_method)\r\n\r\n #Padding mode\r\n try:\r\n padding_mode = meta[\"paddingMode\"].iloc[0]\r\n except:\r\n padding_mode = \"constant\"#cv2.BORDER_CONSTANT\r\n #translate padding_mode to OpenCV argument\r\n if \"cv2.\" not in padding_mode:\r\n padding_mode = pad_arguments_np2cv(padding_mode)\r\n\r\n #Write information in one DataFrame\r\n img_processing_settings = pd.DataFrame()\r\n img_processing_settings[\"model_type\"]=model_type,\r\n img_processing_settings[\"target_imsize\"]=target_imsize,\r\n img_processing_settings[\"target_channels\"]=target_channels,\r\n img_processing_settings[\"normalization_method\"]=normalization_method,\r\n img_processing_settings[\"mean_trainingdata\"]=mean_trainingdata,\r\n img_processing_settings[\"std_trainingdata\"]=std_trainingdata,\r\n img_processing_settings[\"zoom_factor\"]=zoom_factor,\r\n img_processing_settings[\"zoom_interpol_method\"]=zoom_interpol_method,\r\n img_processing_settings[\"padding_mode\"]=padding_mode,\r\n \r\n return img_processing_settings", "def fit(self):\n self.output_data = np.array([])\n self.mapper_data = np.array\n return self", "def train(self, x_data, y_data):\n for model in self.list_of_models:\n model.fit(x_data, y_data)\n self.trained_models.append(model)", "def build(self):\n self.build_inputs()\n self.build_image_embeddings()\n self.build_seq_embeddings()\n self.build_encoder()\n self.build_prediction_model()\n self.setup_encoder_initializer()\n self.setup_global_step()\n self.list_trainable_variables()", "def train(self):\n raise NotImplementedError", "def run(self):\r\n # Close any open models\r\n self.cleanUp()\r\n # Dynamically select file to preview\r\n self.selectTrial() \r\n # Add adjusted COM (RRA/CMC) model\r\n self.loadAdjustedModel()\r\n # Hide the markers from view\r\n self.hideModelMarkers()\r\n # Load CMC motion to model\r\n self.loadCMCMotion()", "def SetModel( self, loader ):\n self.loader = loader\n self.adapter,tree,rows = self.RootNode( )\n self.listControl.integrateRecords( rows.values())\n self.activated_node = tree\n self.squareMap.SetModel( tree, self.adapter )\n self.RecordHistory()", "def initialize(self):\n self.write_model(path=PATH.GRAD, suffix='new')\n\n if PAR.RANDOM_OVER_IT or optimize.iter == 1:\n self.get_random_frequencies()\n\n print('Generating synthetics')\n system.run('solver', 'eval_func',\n hosts='all',\n path=PATH.GRAD)\n\n self.write_misfit(path=PATH.GRAD, suffix='new')", "def set_train(self):\n self.model.train()", "def __train_model(self):\n logger.info(\"Training the ALS model...\")\n self.model = ALS.train(self.ratings_RDD, self.rank, seed=self.seed,\n iterations=self.iterations, lambda_=self.regularization_parameter)\n logger.info(\"ALS model built!\")", "def __train_model(self):\n logger.info(\"Training the ALS model...\")\n self.model = ALS.train(self.ratings_RDD, self.rank, seed=self.seed,\n iterations=self.iterations, lambda_=self.regularization_parameter)\n logger.info(\"ALS model built!\")", "def create_model(self):\n pass", "def create_model(self):\n pass", "def set_train(self):\n for m in self.models.values():\n m.train()", "def main():\n\n from pyanno.modelBt_loopdesign import ModelBtLoopDesign\n model = ModelBtLoopDesign.create_initial_state(5)\n annotations = model.generate_annotations(2)\n\n anno = AnnotationsContainer.from_array(annotations, name='blah')\n model_view = AnnotationsView(annotations_container=anno, model=HasTraits())\n model_view.configure_traits()\n return model, annotations, model_view", "def train(self, absList, modelFilename):\n pass", "def build_model(self):\r\n self.images, self.labels = self.dataloader.get_model_inputs()\r\n\r\n model = SimpleModel(self.images, self.labels, output_dim=F.output_dim, scope='source_regressor')\r\n self.out, _ = model.get_model()\r\n self.get_loss()", "def train(self)->None:", "def _load_training_data(self):\n self._save_training_data()", "def _train_model(self):\n raise NotImplementedError()", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b" ]
[ "0.6559245", "0.61642045", "0.6133834", "0.60486835", "0.60486835", "0.59964865", "0.59934956", "0.5981591", "0.5929513", "0.58844835", "0.5876345", "0.5875687", "0.58711773", "0.5865653", "0.5842758", "0.5839361", "0.58370984", "0.58153534", "0.5811744", "0.58106077", "0.58106077", "0.58004713", "0.5781036", "0.5780701", "0.5780701", "0.5780701", "0.5780701", "0.5780701", "0.5769261", "0.5768432", "0.57606715", "0.57606715", "0.57606715", "0.57606715", "0.57606715", "0.575196", "0.5744262", "0.57329935", "0.57322365", "0.5731592", "0.57238334", "0.57212555", "0.57193375", "0.5719271", "0.57116884", "0.5703414", "0.56983495", "0.56847876", "0.56839", "0.56673586", "0.56621116", "0.5661651", "0.56600815", "0.56575304", "0.5647455", "0.5641829", "0.5641242", "0.56389934", "0.56374264", "0.56352705", "0.56352", "0.56352", "0.56352", "0.56352", "0.56352", "0.5635055", "0.5628739", "0.56269467", "0.56248116", "0.5619648", "0.5618551", "0.5617756", "0.56094664", "0.5603652", "0.55976444", "0.55950475", "0.5590941", "0.5574098", "0.55668527", "0.55654734", "0.55649525", "0.556465", "0.5564135", "0.5561499", "0.5559971", "0.5555532", "0.55552477", "0.555297", "0.5550434", "0.5550434", "0.55446607", "0.55446607", "0.55408406", "0.55395114", "0.5539338", "0.5531804", "0.55300057", "0.5519693", "0.55171436", "0.551308", "0.5511958" ]
0.0
-1
Append error message to errors.md file.
def _update_errors_report(self, model_name, error_msg): errors_filename = os.path.join(self._get_results_path(), "errors.md") with open(errors_filename, "a") as fout: self.verbose_print(f"There was an error during {model_name} training.") self.verbose_print(f"Please check {errors_filename} for details.") fout.write(f"## Error for {model_name}\n\n") fout.write(error_msg) link = "https://github.com/mljar/mljar-supervised/issues/new" fout.write( f"\n\nPlease set a GitHub issue with above error message at: {link}" ) fout.write("\n\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_error(self, msg):\n self._add_message(msg, self._errors)", "def add_error(self, reference_id, error):\n\n with open('runReport.txt', 'a') as report:\n try:\n report.write(\"\\nError: \" + self.domain + \" \" + reference_id + \": \" + error)\n except Exception:\n report.write(\"\\nError: \" + self.domain + \" \" + reference_id)", "def add_error(self, message):\n self.errors.append(message)", "def add_error(self, content):\n self._add_content(html_error(content))", "def append_error(self, msg):\n if msg.startswith(IGNORE_PREFIX):\n misc.cdblogv(misc.kLogErr, 0, \"bomcreator: error message cannot be ignored (%s)\" % msg)\n msg = msg[len(IGNORE_PREFIX):]\n self._messages.append((msg, 'alert-error'))\n misc.cdblogv(misc.kLogErr, 0, \"bomcreator error hint: \" + msg)\n self._hasError = True", "def add_error(\n self,\n message: str,\n position: Optional[Tuple[int, int]] = None,\n headline: Optional[Headline] = None,\n word: Optional[Word] = None,\n ) -> None:\n start: int = 0\n end: int = 0\n\n if position:\n start, end = position\n elif headline:\n start, end = self.report.get_headline_position(headline)\n elif word:\n start, end = self.report.get_word_postion(word)\n\n self.errors.append({\"message\": message, \"start\": start, \"end\": end})", "def add(self, message: str) -> None:\n self.errors.append(message.strip())", "def add_error(*msg):\n\n global errors\n errors.append(''.join(msg))", "async def add_error(self, ctx, error):\n embed: Embed = settings.get_ticket_error_embed()\n\n embed.set_footer(text=embed.footer.text,\n icon_url=self.bot.user.avatar_url)\n print(error)\n if isinstance(error, commands.MissingRequiredArgument):\n embed.description = f\"\\nUse **!add <user>**\"\n elif isinstance(error, commands.BadArgument):\n embed.description = f\"\\nUser not found.\"\n else:\n embed.description = f\"\\nYou don't have permissions for executing this command.\"\n\n await ctx.send(embed=embed)", "async def gen_error(error_id: str, ctx: commands.Context) -> Embed:\n errors = get_file(\"errors\")\n error = Embed(color=error_color)\n error.add_field(name=\"⚠️ \" + errors[error_id][\"title\"], value=errors[error_id]['txt'])\n error = set_footer(error, ctx)\n await ctx.send(embed=error)", "def add_error(self, err_msg):\n assert err_msg is not None, 'err_msg cannot be None'\n\n self.error_found = True\n self.error_message = err_msg.strip()", "def add_error(self, path, error):\n self.errors = merge_errors(self.errors, self._make_error(path, error))", "def add_error(self, error):\n self.errors.append(error)", "def _insertErrorMsg(self, ErrorMessage, outputFileObject):\n outputFileObject.write('<font color=\"' + AutoGrader.Const.ERROR_COLOR + '\">')\n outputFileObject.write (ErrorMessage)\n outputFileObject.write('</font>')", "def add_error(self, u_file: UserFile, code: Code, msg: str,\n severity: Severity = Severity.FATAL,\n is_persistant: bool = True) -> None:", "def add_errdir(self):\n os.rename(self.rundir[-1], self.rundir[-1] + \"_err.\" + str(len(self.errdir)))\n self.update_errdir()", "def add_error_entry(title, description):\n global data_output\n\n data_output.append({\n 'title': title,\n 'value': description,\n 'color': fg(\"grey_30\")\n })", "def initialize_error_summary() -> str:\n error_summary = '\\nSummary of <span class=\"tex-fatal\">Critical Errors:</span>\\n\\n<ul>\\n'\n return error_summary", "def error(self, msg):\n if self.current_line and self.current_file:\n msg = '{}\\nError in {} line {}'.format(\n msg, self.current_file, self.current_line)\n return self.DirectiveError(msg)", "def _reportErrorMsg(self, ErrorMessage, outputFile):\n f=self.openFile(outputFile, \"a\") #open otuputFile for appending\n self._insertErrorMsg(ErrorMessage, f)\n f.close()", "def write_error_summary(error):\n fullpath = request.environ.get('FULLPATH', request.path)\n uid = c.user._id if c.user_is_loggedin else '-'\n g.log.error(\"E: %s U: %s FP: %s\", error, uid, fullpath)", "def add_errors(self, errors):\n self.errors = merge_errors(self.errors, errors)", "def error(msg):\n sys.stdout.write('%s[ ERROR ]%s %s\\n' % (colors.RED, colors.RESET, msg))", "def AppendErrorMessage(self, error_message):\n self._test_results[self._output_volume_index] = False\n self._test_message.append(\n 'Under output volume %r' % self._output_volumes[\n self._output_volume_index])\n self._test_message.append(error_message)\n session.console.error(error_message)", "def error(self, message: str) -> None:\n lines = message.split('\\n')\n linum = 0\n formatted_message = ''\n for line in lines:\n if linum == 0:\n formatted_message = 'Error: ' + line\n else:\n formatted_message += '\\n ' + line\n linum += 1\n\n self.print_usage(sys.stderr)\n\n # Format errors with style_warning()\n formatted_message = ansi.style_warning(formatted_message)\n self.exit(2, '{}\\n\\n'.format(formatted_message))", "def errormessage(self, msg) :\n\t\tif msg != self.__olderror :\n\t\t\tself.__stderr.write(\"%s\\n\" % msg)\n\t\t\tself.htmlmessage(msg)\n\t\tself.__olderror = msg[:]\n\t\treturn -1", "def err(message: str) -> None:\n filename, line = filename_line()\n\n with State.lock:\n State.stderr.write(err_as_text(filename=filename, line=line, message=message))\n State.stderr.flush()", "def add_error(self, field, message):\n add_list_value(self.errors, field, message)", "def add_error(self, u_file: UserFile, code: Code, msg: str,\n severity: Severity = Severity.FATAL,\n is_persistant: bool = True) -> None:\n e = Error(severity=severity, path=u_file.path, code=code,\n message=msg, is_persistant=is_persistant)\n u_file.add_error(e)", "def set_error_message(msg):\n set_message(msg, TYPE_ERROR)", "def error(self, message=None, show_help=True):", "def error_logging(filename, cloud, msg):\n with open(filename, 'a') as f:\n f.write(cloud + \" \" + msg + '\\n')\n f.write('\\n')", "def ferrmsg(msg, proc=''):\r\n\tf = open(ERROR_LOG, 'a')\r\n\tf.write(\"<%s>: %s\\n\" % (proc,msg))\r\n\tf.close()", "def err(filename, tree, elm, msgtype, msg, error = True):\n global errors, errexists, warnings, warnexists, fatals, fatalexists\n\n # Let user tune whether a warning or error\n fatal = is_enabled(elm, msgtype, dofatals, error)\n\n # By default warnings and errors are enabled, but let user tune it\n if not is_enabled(elm, msgtype, enables, True):\n return\n\n (prefix, suppr) = elm_suppr(filename, tree, elm, msgtype, True)\n if suppr in false_positives:\n # That was actually expected\n return\n if suppr in suppressions:\n # Suppressed\n suppressions[suppr] = False\n if fatal:\n fatalexists += 1\n if error:\n errexists += 1\n else:\n warnexists += 1\n return\n\n if error:\n errors += 1\n else:\n warnings += 1\n if fatal:\n fatals += 1\n\n msg = \"%s %s%s: %s%s\" % (prefix,\n \"FATAL \" if fatal else \"\",\n \"ERROR\" if error else \"WARNING\",\n elm_name(elm), msg)\n print(msg)\n if outfile is not None:\n print(msg, file=outfile)", "def error(self, message, new_line=True):\n #\n # Note that while the call to \"get_caller()\" is costly, it only happens\n # when an error occurs, so it shouldn't impact performance\n #\n error_data = (message, self.get_caller())\n self._errors.append(error_data)", "def add_to_error_log(message):\n f = open(Filenames.ERROR_LOG, \"a\")\n f.write((\"------------- %s --------------\\n\" % time.ctime()) + message)\n f.close()", "def collect_errors_and_warnings(self) -> str:\n # Complete error message\n message = \"----------------ERRORS----------------\\n\"\n if self.errors == \"\":\n message = \"YOUR FILE IS VALIDATED!\\n\"\n logger.info(message)\n else:\n for error in self.errors.split(\"\\n\"):\n if error != \"\":\n logger.error(error)\n message += self.errors\n if self.warnings != \"\":\n for warning in self.warnings.split(\"\\n\"):\n if warning != \"\":\n logger.warning(warning)\n message += \"-------------WARNINGS-------------\\n\" + self.warnings\n return message", "def error(self, e):\n return \"{}: {} ({})\".format(e.__class__.__name__, e.__doc__, e.message)", "def msg_err(message):\n to_stdout(\" !!! {message}\".format(message=message), colorf=red, bold=True)\n if _logger:\n _logger.error(message)", "def error(self, message):\n ErrorExit('error: {}\\n'.format(message), 2)", "def error(self, message):\r\n self._construct_partial_parser().error(message)", "def error(self, message):\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)", "def error(self, msg):\n with self._lock:\n self.wraptext(msg, fg=\"red\", bold=True)\n return self", "def error(self, message):\n print message", "def err_message(self, message):\n self.errors.append(1)\n message = \"<b>\" + message + \"</b>\"\n self.timer_id = GLib.timeout_add_seconds(5, self.error_false)\n # Show if is was hidden\n if self.hidden:\n self.toggle()\n self.was_hidden = True\n self.left_label.set_markup(message)", "def message_error(self, m):\n self.message(m, logging.ERROR)", "def _report_error(self, message):\n self._logger.error(message)\n page = self._bot.get_page(self.ERROR_TITLE)\n\n current = page.text\n if current and current.split(\":\", 1)[1][1:] == message:\n # No need to double-report the same message\n return\n\n wikitime = datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")\n page.text = \"{}: {}\".format(wikitime, message)\n page.save(\"Error while loading configuration\", minor=False)", "def addError(self, test, err):\r\n self.errors.append((test, self._exc_info_to_string(err, test)))\r\n self._mirrorOutput = True", "def set_error(errTxt):\r\n core.set_item_color(\"Start\", mvGuiCol_Button, (255, 0, 0, 255))\r\n core.set_item_color(\"Start\", mvGuiCol_ButtonActive, (255, 0, 0, 255))\r\n core.set_item_color(\"Start\", mvGuiCol_ButtonHovered, (255, 0, 0, 255))\r\n if not core.does_item_exist(\"Error##ErrorNoFACEITName\"):\r\n with simple.collapsing_header(\"Error##ErrorNoFACEITName\", parent=\"##GroupStats\",\r\n default_open=True,\r\n closable=False,\r\n bullet=True):\r\n core.add_text(\"ErrorText\", default_value=errTxt, color=(255, 0, 0, 255))", "def missing_entry_error(entry, name, yml):\n\n yml = symlink_target(yml)\n output_1 = path(yml) + '\\n'\n output_2 = colored(' - Error: Missing ', 'red')\n output_3 = colored(str(entry), attrs=['bold'])\n output_4 = colored(' in ', 'red')\n output_5 = colored(str(name), attrs=['bold'])\n return output_1 + output_2 + output_3 + output_4 + output_5", "def add_error(self, error: Error):\n self.is_valid = False\n self.error.append(error)", "def error_embed(message: str, title: Optional[str] = None) -> Embed:\n title = title or random.choice(ERROR_REPLIES)\n embed = Embed(colour=Colours.soft_red, title=title)\n embed.description = message\n return embed", "def log_error(title, message):\n if title == \"Redundant\":\n print(f\"[{title}]: Refactoring is not necessary\")\n else:\n print(f\"[{title}]: Refactoring is not allowed\")\n print(f\"{message}\")", "def error(cls, message):\n print('[ERROR] {0}'.format(message))", "def write_error(self, reason):\n self.write_message({\n 'status': 'error',\n 'reason': reason,\n })", "def error(self, message):\n sys.stderr.write(message[0].capitalize() + message[1:] + '\\n')\n sys.stderr.write('Use \"arhc.py --help\" to view more information.\\n')\n exit()", "def error(message='Ops, there are some error...'):\n print(colorful_text(message, Fore.RED))", "def err(msg):\n print(colored.red(\"[ERROR]: {0}\".format(msg)))", "def show_error(title, message, print_message=False):\n\n pass", "def error_message(self, error_message):\n\n self._error_message = error_message", "def error(self, error_str):\n # Make sure errors have the same type\n error_str = str(error_str)\n self._output_object.add_error(\n html_tag(plain_to_html(error_str), error_str, self.proc)\n )\n self.errors.append((error_str, self.proc))", "def add_error_non_file(self, code: Code, msg: str,\n severity: Severity = Severity.FATAL,\n is_persistant: bool = True) -> None:\n self._insert_error(Error(severity=severity, path=None, code=code,\n message=msg, is_persistant=is_persistant))", "def add(self, obj, msg):\n self.errors.append((obj, msg))", "def error_message(message='Ops, there are some error...'):\n print(colorful_text(message, Fore.RED))", "def error(message):\n print str(message)", "def error(self, message: str) -> None:\n\n self.__add_log(self.ERROR, message)", "def error_message(self, error_message: str):\n\n self._error_message = error_message", "def _add_error(self, key, message):\n if key not in self._error_key_list:\n self._error_key_list.append(key)\n self.add_error(key, str(message))", "def _print_error_text(self, stream, tagname, error):\n text = escape(str(error[1]))\n stream.write('%s: %s\\n' % (tagname.upper(), text))\n tb_stream = StringIO()\n traceback.print_tb(error[2], None, tb_stream)\n stream.write(escape(tb_stream.getvalue()))\n stream.write('-' * 80 + '\\n')", "def add_error_url(self, url):\n self._load_error_urls()\n if url not in self.errorurls:\n self.errorurls.add(url)\n errorurlsfile = osp.join(self.basepath, 'errors.csv')\n f = open(errorurlsfile, 'a')\n csvout = csv.writer(f, delimiter=',', quotechar='\"')\n csvout.writerow([url.encode('UTF-8')])\n f.close()", "def error(self, *lines):\n if self.__debug_level >= DEBUG_LEVELS['error']:\n self.print_lines(self.colored(('red', 'bold'), lines))", "def Write404Error(self):\n self.error(404)\n self.response.out.write(\n ''.join(['<html><head><title>404: Not Found</title></head>',\n '<body><b><h2>Error 404</h2><br/>',\n 'File not found</b></body></html>']))", "def Write404Error(self):\n self.error(404)\n self.response.out.write(\n ''.join(['<html><head><title>404: Not Found</title></head>',\n '<body><b><h2>Error 404</h2><br/>',\n 'File not found</b></body></html>']))", "def error(msg):\n click.secho(f'[ERROR] {msg}', fg='red')", "def make_error(self) -> Optional[str]:\n info = self._info\n if info is None:\n return None\n startl, endl = info.line, info.endline\n\n return \"\\n\".join(((f\"On line {startl + 1}:\"\n if startl == endl else\n f\"On lines {startl + 1} to {endl + 1}:\"),\n self.highlight_lines))", "def appendToStderr(self, txt):\n added = self.__logViewer.appendToStderr(txt)\n if added:\n self.__ui.showLogViewer()", "def save_file_error(pth):\n\n output_1 = colored(' - Error: Failed to save file\\n', 'red')\n output_2 = path(pth)\n return output_1 + output_2", "def log_error(self, message):\n # log the datetime+message to error_log.txt\n curr_time = datetime.datetime.now().strftime(\"%H:%M:%S \"\n \"%Y-%m-%d\")\n with open(ERROR_FILE_PATH, \"a+\") as error_file:\n error_file.write(\"{} $ {}\\n\".format(curr_time, message))", "def formatError(self,error):\n return '<font color=\"#f00\"><b><i>%s</i></b></font><br />\\n' % error", "def error_msg(self) -> str:\n return self.__error_msg", "def error(err):\n\n return str(err) + '\\n'", "def show_errors(self):\n\n if self.errors:\n print('Clean error in:')\n for file in self.errors:\n print(' %s' % file)", "def error(message: str) -> None:\n print(f\"ERROR: {message}\")", "def error(message):\n print(message, file=sys.stderr)", "def add_error_non_file(self, code: Code, msg: str,\n severity: Severity = Severity.FATAL,\n is_persistant: bool = True) -> None:", "def error():\n return render_template(\"error.html\", **locals())", "def error(message, code=400):\n return render_template(\"error.html\", top=code, bottom=message)", "def error(msg):\n return ErrorRule(msg)", "def place_error_message(self, message):\n msg = tk.Message(self.parent, text='Error: ' + message)\n msg.config(bg='white', font=('times', 18, 'bold'))\n msg.pack()", "def setErrorFile(fname='dis.err'):\n dislin.errfil(fname)", "def error(_bot, update, error_):\n logger.warning('Update \"%s\" caused error \"%s\"', update, error_)", "def error_log(self):\n if not self._error_log_text:\n self._error_log_text = self._cat('/tmp/errors')\n return self._error_log_text", "def write_error_message(self, message: str):\n\n return sys.exit(message)", "def set_error(self, error):\n self._set_sub_text('error', text=str(error))\n return self", "def error(errornum):\n return render_template('error.html', errornum=errornum)", "def make_error( title, *args, **kwargs ):\n blocks = list()\n blocks.append( '<h1>{}</h1>'.format( title ) )\n if args:\n blocks.append( '<h4>{}</h4>'.format( args[ 0 ] ) )\n for arg in args[ 1 : ]:\n blocks.append( '<p>{}</p>'.format( arg ) )\n if kwargs:\n dl = list()\n for key, value in kwargs.items():\n dl.append( '<dt>{}</dt><dd>{}</dd>'.format( key, value ) )\n blocks.append( '<dl>\\n{}\\n</dl>'.format( '\\n'.join( dl ) ) )\n return _html.format(\n title = title,\n head = '',\n body = '\\n'.join( blocks )\n )", "def error_message(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"error_message\")", "def errfile(self):\n\n return f\"{self.name}.err.out\"", "def addError(self, test, err):\n self.errors.append((proto_test(test), proto_error(err)))", "def print_error(self, msg, line_num=False, errorFunc=SystemError):\n if line_num is False: line_num = self.line_num\n bad_line_ind = self.line_nums[line_num]\n\n err_msg = \"\\n\\n\\n############ ERROR #############\\n\"\n err_msg += \"Error in input_file '%s'\\n\\n---\\n\" % self.inp_filename\n err_msg += msg.strip(\"\\n\")\n err_msg += \"\\n---\\n\\nline number: %i\\n\" % self.line_nums[line_num]\n err_msg += f\"line: '{self.file_ltxt_orig[bad_line_ind]}'\"\n err_msg += \"\\n\"\n err_msg += f\"err id: {self.E_str}\"\n err_msg += \"\\n#################################\\n\\n\"\n raise errorFunc(err_msg)" ]
[ "0.68803924", "0.68077487", "0.6791102", "0.6587171", "0.65130657", "0.6467027", "0.6418997", "0.6406481", "0.63759094", "0.6353462", "0.63198704", "0.6297876", "0.62030095", "0.6134235", "0.61272323", "0.60763824", "0.60648596", "0.605622", "0.6027484", "0.5985176", "0.598342", "0.5975521", "0.5962527", "0.5954178", "0.59461045", "0.59300107", "0.5927192", "0.58871007", "0.58852726", "0.5885242", "0.58568096", "0.5850335", "0.58432174", "0.5820495", "0.5820148", "0.58177185", "0.5802512", "0.5795362", "0.5793221", "0.57891214", "0.57820046", "0.57704043", "0.5747341", "0.5737585", "0.57030463", "0.57013255", "0.5694929", "0.56933165", "0.56792194", "0.56718594", "0.5670068", "0.5658734", "0.5653164", "0.5640223", "0.5639762", "0.5629063", "0.56151897", "0.5610398", "0.5607968", "0.5602261", "0.56017643", "0.55972713", "0.55918175", "0.5591491", "0.5586971", "0.55633956", "0.55587363", "0.55416745", "0.5538736", "0.55185384", "0.5517706", "0.55153716", "0.55153716", "0.55153066", "0.5514081", "0.55061245", "0.55022293", "0.55006385", "0.5498121", "0.5484178", "0.5483863", "0.548101", "0.5479344", "0.5478346", "0.5475496", "0.5472224", "0.54707164", "0.5468891", "0.5468298", "0.5466084", "0.5464247", "0.5463518", "0.54633325", "0.54597956", "0.5444122", "0.5440683", "0.5431537", "0.5430356", "0.5430183", "0.5429476" ]
0.7012683
0
Gets the current mode
def _get_mode(self): self._validate_mode() return deepcopy(self.mode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_mode(self):\r\n return self._api.get_mode()", "def getmode(self):\n return self.mode", "def get_mode(self):\r\n return self.mode", "def currentMode(self):\n logger.debug(\"Func: currentMode/getter\")\n\n return self._currentsDict[\"currentMode\"]", "def getMode(self):\n return self._mode", "def getMode(self):\n with self.lock:\n mode = self.mode\n return mode", "def get_current_mode(self):\n return self.read(0xa2)", "def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)", "def get_mode(self, ):\n return self.get_parameter('mode')", "def mode(self):\r\n return self._mode", "def mode(self):\r\n return self._mode", "def mode(self):\r\n return self._mode", "def mode(self):\n return self._mode", "def mode(self):\n return self._mode", "def mode(self):\n return self._mode", "def mode(self):\n return self._data.get('mode', None)", "def mode(self):\n return self._lift(\"mode\")", "def mode(self):\n return self.__mode", "def _get_mode():\n return context.get_context('mode')", "def mode(self):\n\n return self._mode", "def mode(self) -> str:\n return pulumi.get(self, \"mode\")", "def mode(self) -> str:\n return pulumi.get(self, \"mode\")", "def mode(self) -> Mode:\n return self._mode", "def _get_mode(self):\n raise NotImplementedError", "def mode(self) -> str:\r\n return self._mode", "def mode(self) -> Optional[str]:\n for mode in self._modes:\n if mode.active:\n return mode.name\n return None", "def mode(self):\n return self._mode_func", "def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")", "def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")", "def mode(self) -> int:\n return self._mode", "def mode(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"mode\")", "def mode(self) -> Optional[pulumi.Input[Union[str, 'Mode']]]:\n return pulumi.get(self, \"mode\")", "def mode(self):\r\n pass", "def mode(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mode\")", "def mode(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mode\")", "def mode(self) -> Mode:\n ...", "def _get_modes(self):\n return self.__modes", "def mode(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"mode\")", "def game_mode(self):\n return self._get(\"game_mode\")", "def get_mode(self):\r\n _debug('simq03b_api.get_mode')\r\n \r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return", "def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'FIX': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return", "def modeId(self):\n return self.__modeId", "def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return", "def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return", "def mode(self) -> str:\r\n ...", "def get_window_mode(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetWindowMode', self.handle)", "def get_app_mode(self):\n\t\treturn call_sdk_function('PrlApi_GetAppMode')", "def get_mode(self):\n self.read(\":FUNC?\")", "def last_on_mode(self):\n return self._last_on_mode", "def getMode(self):\r\n # ViStatus status = AcqrsD1_getMode(ViSession instrumentID,\r\n # ViInt32* mode, ViInt32* modifier, ViInt32* flags)\r\n mode = ViInt32()\r\n modifier = ViInt32()\r\n flags = ViInt32()\r\n self.callFunc('AcqrsD1_getMode', self.session,\r\n byref(mode), byref(modifier), byref(flags))\r\n return (mode.value, modifier.value, flags.value)", "def getmode(self, mode):\r\n modes = {}\r\n # core modes\r\n for m, (basemode, basetype, bands) in _MODEINFO.items():\r\n modes[m] = ModeDescriptor(m, bands, basemode, basetype)\r\n # extra experimental modes\r\n modes[\"RGBa\"] = ModeDescriptor(\"RGBa\",\r\n (\"R\", \"G\", \"B\", \"a\"), \"RGB\", \"L\")\r\n modes[\"LA\"] = ModeDescriptor(\"LA\", (\"L\", \"A\"), \"L\", \"L\")\r\n modes[\"La\"] = ModeDescriptor(\"La\", (\"L\", \"a\"), \"L\", \"L\")\r\n modes[\"PA\"] = ModeDescriptor(\"PA\", (\"P\", \"A\"), \"RGB\", \"L\")\r\n # mapping modes\r\n modes[\"I;16\"] = ModeDescriptor(\"I;16\", \"I\", \"L\", \"L\")\r\n modes[\"I;16L\"] = ModeDescriptor(\"I;16L\", \"I\", \"L\", \"L\")\r\n modes[\"I;16B\"] = ModeDescriptor(\"I;16B\", \"I\", \"L\", \"L\")\r\n # set global mode cache atomically\r\n _modes = modes\r\n return _modes[mode]", "def getMode(self, modeName = None):\n\t\tif modeName not in self.modes:\n\t\t\tif modeName == None:\n\t\t\t\traise Exception(\"Get schema '%s' error\" % self.name)\n\t\t\telse:\n\t\t\t\traise Exception(\"Get schema '%s' with mode name '%s' error\" % (self.name, str(modeName)))\n\t\treturn self.modes.get(modeName)", "def get_current_mode() -> ThemeMode:\n try:\n # test:\n assert False\n # todo: Check platform and add more platforms.\n with winreg.OpenKey(\n winreg.HKEY_CURRENT_USER,\n (r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Themes'\n r'\\Personalize'),\n access=winreg.KEY_READ) as hkey:\n return (ThemeMode.light\n if winreg.QueryValueEx(hkey, 'AppsUseLightTheme')[0]\n else ThemeMode.dark)\n except Exception:\n return get_current_app_mode()", "def get_current_eq_mode(self):\n response = self.get(COMMAND_UIC, 'GetCurrentEQMode')\n\n return response", "def mode(self):\n if \"mode\" in self.recipe:\n return self.recipe[\"mode\"]\n else:\n raise ValueError(\"No mode defined for recipe {}!\".format(self))", "def get_mode(self, port):\n port = int(port)\n self._validate_port(\"get_mode\", port)\n flags = self._regex_shell_fn(\n self._command_dict[\"GET_MODE\"].format(port),\n self._regex_dict[\"GET_MODE_REGEX\"],\n tries=5)\n\n if \"O\" in flags:\n mode = OFF\n elif \"S\" in flags:\n mode = SYNC\n else:\n mode = CHARGE\n return mode", "def dev_mode(self):\r\n return self._dev_mode", "def get_focus_mode(self):\n if self.active_focus_mode:\n return self.active_focus_mode.lower()", "def mode(self):\n return self._wepy_mode", "def current_fan_mode(self):\n return self._current_fan_mode", "def mode(self) -> int:", "def mode(self, mode: Optional[int] = None) -> Optional[int]:\n ...", "def get_mode(self):\n summary = self.get_version_summary()\n pattern = '\\$.*? .*? (.*?) .*? .*? .*? .*? .*? .*? \\r\\n' \n mode = re.findall(pattern,summary).pop()\n return mode", "def current_swing_mode(self):\n return self._current_swing_mode", "def fan_mode(self) -> str | None:\n return self._current_fan_mode", "def swing_mode(self) -> str | None:\n return self._current_swing_mode", "def check_mode(self):\n if self.proximity.check_press():\n self.cycle_mode()\n return self.mode", "def eval_mode(self):\n return self._eval_mode", "def mode(self):\n return self._vdma.readchannel.mode", "def get_double_mode(self):\r\n msg = struct.pack('>2B', 56, 1)\r\n response = self.query(msg)\r\n if response[1] == 254:\r\n return 'Subtractive mode selected.'\r\n elif response[1] == 1:\r\n return 'Additive mode selected.'\r\n else:\r\n raise ValueError('Mode not recognised.')", "def device_mode(self) -> str:\n raw_mode = self._device_info[\"SensorMode\"]\n if raw_mode not in DEVICE_MODE_MAP:\n LOGGER.debug(\"Unknown device mode value: %s\", raw_mode)\n return DEVICE_MODE_UNKNOWN\n return DEVICE_MODE_MAP[raw_mode]", "def auto_mode(self):\n return self._auto_mode", "def common_mode(self):\n return self._common_mode", "def common_mode(self):\n return self._common_mode", "def current_option(self) -> str:\n return self.coordinator.data.settings.lamp_mode.name.lower()", "def getmodebase(mode):\r\n return ImageMode().getmode(mode).basemode", "def _get_vc_mode(self):\n return self.__vc_mode", "def operating_mode(self):\n return self._read(MX_OPERATING_MODE)", "def get_mode(guild_id: int):\n key = _mode_key(guild_id)\n if key not in db:\n return fixtures.chat\n return db[key]", "def tflite_mode(self):\n return getattr(self, \"_tflite_mode\", False)", "def preset_mode(self):\n return self._preset_mode", "def get_current_app_mode() -> ThemeMode:\n theme: ApplicationTheme = themes[0]\n\n # Change current mode.\n path = theme.path\n try:\n with open(path, 'r', encoding='utf-8') as f:\n pass\n except FileNotFoundError:\n path = theme.windows_path\n try:\n with open(path, 'r', encoding='utf-8') as f:\n settings = json.load(f)\n return theme.modes[settings[theme.keys] == theme.light_name]\n except Exception:\n return ThemeMode.light", "def mode(self) -> Union[int, float, str,\n List[int], List[float], List[str]]:\n mode = self._data.mode()\n if len(mode) > 1:\n return mode.to_list()\n else:\n return mode[0]", "def mode(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n return out[get_key(zonekeys.MODE, self._SW_VER)]", "def get_mode(self, excluding=None):\r\n if excluding is None:\r\n excluding = []\r\n m = theano.config.mode\r\n if m == 'FAST_COMPILE':\r\n mode = theano.compile.mode.get_mode('FAST_RUN')\r\n else:\r\n mode = theano.compile.mode.get_default_mode()\r\n if excluding:\r\n return mode.excluding(*excluding)\r\n else:\r\n return mode", "def getUserMode(self, username):\r\n return self.getUser(username).mode", "def get_modes(self):\n return self.circuit.get_modes()", "def get_pump_mode(self):\n return self.__pump_mode", "def get_color_mode(self):\n mode=lib.is_SetColorMode(self.hcam,0x8000)\n return self._color_modes_inv.get(mode,mode)", "def default_mode(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"default_mode\")", "def get_current_boot_mode(self):\n boot_mode = self._get_bios_setting('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n\n return boot_mode.upper()", "def getPointMode(self):\n l = [point.mode for point in self.points]\n if l.count(l[0]) == len(l):\n return l[0]\n else:\n raise ValueError(\"The modes of the points must be the same otherwise it makes no sense.\")", "def get_mode_parameter(mode):\n if mode == 'job':\n return 'cli'\n elif mode == 'serve':\n return 'serving'\n else:\n return mode", "def get_cache_mode(self) -> CacheModeStr:\n return CACHE_MODES.inverse[self.cacheMode()]", "def get_mode_name(self, i):\n for mode in self.modes:\n if mode['id'] == i:\n return mode['name']\n return 'Unknown Game Mode'", "def get_fan_mode(self):\n return self.parent._fan_auto_mode", "def get_preferred_mode(self):\n ret = self._transfer(TVGetModes())\n return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else None", "def getGatingMode(self, channel, unitCode=0):\n resp = self.XAPCommand('GMODE', channel, unitCode=unitCode)\n return int(resp)", "def getVKBEditMode(self):\r\n return eval(self.phone.sx('(send (get-text-editor-manager) get-edit-mode)', convertToString=True).title())", "def get_autofeed_mode(self):\n mode = self.parent.printer.get_autofeed()\n if mode is not None:\n mode = mode[0]\n return mode" ]
[ "0.91673493", "0.9092009", "0.9040086", "0.89945453", "0.8909406", "0.8863383", "0.8853852", "0.8764516", "0.86753494", "0.8675129", "0.8675129", "0.8675129", "0.8633829", "0.8633829", "0.8633829", "0.8588385", "0.8558855", "0.85565245", "0.8501583", "0.84935856", "0.84677076", "0.84677076", "0.8388347", "0.8385099", "0.8357531", "0.8345505", "0.83437854", "0.81738037", "0.81738037", "0.8160567", "0.81484634", "0.7974692", "0.77744675", "0.77395767", "0.77395767", "0.77194536", "0.76896", "0.76835924", "0.76817477", "0.7639258", "0.7579958", "0.75772285", "0.7537069", "0.7537069", "0.7527684", "0.7513529", "0.7458285", "0.741212", "0.73942065", "0.738503", "0.7352512", "0.73308814", "0.7327231", "0.73241186", "0.7322153", "0.72989666", "0.7265211", "0.72616124", "0.7252234", "0.72489244", "0.7241861", "0.72290903", "0.7228927", "0.7225946", "0.7209551", "0.7202137", "0.719174", "0.7186986", "0.71763325", "0.7158981", "0.7148818", "0.7148091", "0.71418756", "0.71418756", "0.71331847", "0.71308875", "0.711815", "0.70926195", "0.70818645", "0.7070118", "0.70649785", "0.70584184", "0.7057396", "0.7029081", "0.7026534", "0.70013976", "0.6994495", "0.691608", "0.68972695", "0.6891767", "0.68827", "0.6842191", "0.6836792", "0.68285984", "0.6825704", "0.68247616", "0.68117565", "0.6808915", "0.6788368", "0.67843956" ]
0.8524049
18
Gets the current ml_task. If "auto" it is determined
def _get_ml_task(self): self._validate_ml_task() if self.ml_task == "auto": classes_number = self.n_classes if classes_number == 2: self._estimator_type = "classifier" # for sk-learn api return BINARY_CLASSIFICATION elif classes_number <= 20: self._estimator_type = "classifier" # for sk-learn api return MULTICLASS_CLASSIFICATION else: self._estimator_type = "regressor" # for sk-learn api return REGRESSION else: return deepcopy(self.ml_task)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_current_task():\r\n return current_task", "def _get_current_task():\r\n return current_task", "def get_current_task(self):\n return self.get_current_step().get_last_task()", "def task(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"task\")", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def getCurrentTask(self):\n if not self.task.isEmpty():\n return str(self.task.peek())\n else:\n return 'You win! Fly around with your new Iron Man suit!'", "def task(self) -> str:\n return self._task", "def task(self) -> base_model.BaseTask:\n return self._task", "def current_workflow():\n try:\n return current_worker_pool.workflow\n except AttributeError:\n return None", "def target(self):\n return self._task_target", "def get_first_task(self):\n return self.get_first_step().get_last_task()", "def get_target(self):\n task = self.task.get_task(self.task_id)\n if 'name' in task:\n return str(task['name'])\n return str(task)", "def current_task() -> Task:\n try:\n return _current_task.get()\n except LookupError:\n raise RuntimeError(\"Can only be called from an async function.\")", "def get_task_fun(self):\n app_config = self.integration.service.app_config\n try:\n return app_config.periodic_tasks[self.name]\n except KeyError:\n pass", "def get_target_simple(self):\n task = self.task.get_task(self.task_id)\n return str(task['name'])", "def current_task(self):\n try:\n return self.active_item(remove=False)\n except queue.Empty:\n return None", "def get_task(self, profile):\n task = None\n if self._value.has_option(profile, 'task'):\n task = self._value.get(profile, 'task')\n\n self.logger.info(\"%s is selected as task\" % task)\n return task", "def get_previous_task():\n return _tasks[0] if len(_tasks) != 0 else None", "def get_current_task_name(self):\r\n task_available = False\r\n while not task_available:\r\n try:\r\n with open('current_taskname.txt', 'r') as f:\r\n self.task_name = f.read()\r\n task_available = True\r\n except:\r\n print('No available task yet...')\r\n time.sleep(1)\r\n pass\r\n return self.task_name", "def task_id(self):\n return self._mpis.task_id", "def get_task(self):\n return self.queue.get()", "def task_type(self):\n pass", "def task_name(self):\n pass", "def get_input_task(self, name='0'):\n port = self.get_input(name).other\n if port is None:\n return None\n return port.task", "def task_definition(self):\n return self._task_definition", "def _get_task(self, task):\n try:\n return TASKS[task]\n except KeyError:\n raise ValueError(\"task %s \"\n \"is not supported. \" % task)", "def __curtask__(self):\n task = current_task(loop=self._loop)\n if not task:\n raise RuntimeError('No task is currently running')\n\n if not hasattr(task, '_locals'):\n task._locals = local_storage()\n return task._locals", "def getTaskName(self):\n return self._taskName", "def get_task(self, name):\n res = Task()\n self.GetTask(name, res)\n return res", "def get_last_task(self):\n return self.get_task_by_index(-1)", "def get_current_element(self, ):\n n = jbscene.get_current_scene_node()\n if not n:\n return None\n tfid = cmds.getAttr(\"%s.taskfile_id\" % n)\n try:\n tf = djadapter.taskfiles.get(pk=tfid)\n return tf.task.element\n except djadapter.models.TaskFile.DoesNotExist:\n raise djadapter.models.TaskFile.DoesNotExist(\"Could not find the taskfile that was set on the scene node. Id was %s\" % tfid)", "def get(self) -> Task: # pragma: no cover\n raise NotImplementedError", "def get_current_param(self, t=None):\n if self.current_context is None:\n raise Exception(\"The MAB game is not started.\")\n \n return self.get_param(self.current_context)", "def task_name(self) -> str:\n return self._task_name", "def get_task(self, u_name):\n raise NotImplementedError()", "def get_task_type(self):\n\t\treturn call_sdk_function('PrlRunningTask_GetTaskType', self.handle)", "def helper_get_task_or_default(self):\n task_id = self.helper_retrieve_last_request_get_dict_key_val_index_zero_or_return_none(\"taskid\")\n alt_task_store_name = self.helper_retrieve_last_request_get_dict_key_val_index_zero_or_return_none(\"alt_task_store_name\")\n used_task_store = self.task_store\n # don't want to use sth like globals.get(alt_task_store) so that only approved stores can be used\n if alt_task_store_name == \"task_store_trash\":\n used_task_store = self.task_store_trash\n try:\n task = used_task_store.store_dict_id[task_id]\n except Exception as exc:\n # task_id is either None or it is not in store_dict_id\n util.dbgprint(\"exception in helper_get_task_or_default, semi-expected {}\".format(str(exc)))\n self.error_msg_queue_list.append(\"Couldn't retrieve requested note.\")\n return False, 0, 0, self.page_list_notes(no_history=True)\n return True, task_id, task, \"\"", "def task_id(self):\n return self._task_id", "def task_id(self):\n return self._task_id", "def task_id(self):\n return self._task_id", "def task_id(self):\n return self._task_id", "def execution_target(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"execution_target\")", "def getcurrent():\n\n curr = coroutine.getcurrent()\n if curr is _main_coroutine:\n return _main_tasklet\n else:\n return curr", "def current_scheduler() -> \"Scheduler\":\n return current_task().scheduler", "def getmain():\n return _main_tasklet", "def current_mission(self):\n try:\n return self.missions[self.status.mission_task_list[0]]\n except KeyError:\n return None", "def get_task_role(self):\n self.fix_arguments()\n if self.task_role is not None:\n comp = self.task_role\n elif self._model_instance:\n #fetch the default task role for the entire model\n #this can raise an exception if there isn't a\n #default task role defined for the model\n comp = self._model_instance.get_task_role()\n else:\n raise ConfigException(\"Can't find a task role for task {}\".format(self.name))\n return comp", "def get_output_task(self, name='0'):\n port = self.get_output(name).other\n if port is None:\n return None\n return port.task", "def __getattribute__(self, name):\n if name in ('__setattr__', '__getattr__', '__delattr__', '_loop', '__curtask__'):\n return object.__getattribute__(self, name)\n return getattr(self.__curtask__, name)", "def get_task(self, id):\n raise NotImplementedError()", "def task_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"task_type\")", "def get_task(self) -> Generator[Tuple[Optional[schema.Task], Optional[int]], None, None]:\n raise NotImplemented", "def get_task_id(self):\n if self.task_id:\n return self.task_id\n return (f'{self.task_type}_{self.get_source_system().lower()}'\n f'.{self.get_source_subsystem().lower()}.{self.get_name().upper()}')", "def get_task(option_set):\n return option_set & TASK_MASK", "def get_task_token(decision):\n try:\n return decision[\"taskToken\"]\n except KeyError:\n # No taskToken returned\n return None", "def current_job(self):\n assert(ExecutorThread.executor_object is not None)\n return self.__job", "def get_lr_scheduler(self) -> Optional[pytorch.LRScheduler]:\n return self.lr_scheduler", "def task(self):\n return import_path_to_callable(self.func)", "def get_task_index(self):\n return self.task_index", "def getTask():\n\tcontent = requests.get(MANAGER_URL+\"task\", params={\"apiKey\": API_KEY}).text\n\tif content == \"null\":\n\t\treturn None\n\telse:\n\t\treturn json.loads(content)", "def getTask(self, name):\n for t in self.tasks:\n if isinstance(name, str):\n if t.name == name:\n return t\n else:\n if t.__class__ is name:\n return t\n return None", "def setup_next_task(self, reset=False):\r\n current_task_state = None\r\n if len(self.task_states) > self.current_task_number:\r\n current_task_state = self.task_states[self.current_task_number]\r\n\r\n self.current_task_xml = self.task_xml[self.current_task_number]\r\n\r\n if self.current_task_number > 0:\r\n self.ready_to_reset = self.check_allow_reset()\r\n if self.ready_to_reset:\r\n self.current_task_number = self.current_task_number - 1\r\n\r\n current_task_type = self.get_tag_name(self.current_task_xml)\r\n\r\n children = self.child_modules()\r\n child_task_module = children['modules'][current_task_type]\r\n\r\n self.current_task_descriptor = children['descriptors'][current_task_type](self.system)\r\n\r\n # This is the xml object created from the xml definition of the current task\r\n etree_xml = etree.fromstring(self.current_task_xml)\r\n\r\n # This sends the etree_xml object through the descriptor module of the current task, and\r\n # returns the xml parsed by the descriptor\r\n self.current_task_parsed_xml = self.current_task_descriptor.definition_from_xml(etree_xml, self.system)\r\n if current_task_state is None and self.current_task_number == 0:\r\n self.current_task = child_task_module(self.system, self.location,\r\n self.current_task_parsed_xml, self.current_task_descriptor,\r\n self.static_data)\r\n self.task_states.append(self.current_task.get_instance_state())\r\n self.state = self.ASSESSING\r\n elif current_task_state is None and self.current_task_number > 0:\r\n last_response_data = self.get_last_response(self.current_task_number - 1)\r\n last_response = last_response_data['response']\r\n current_task_state = json.dumps({\r\n 'child_state': self.ASSESSING,\r\n 'version': self.STATE_VERSION,\r\n 'max_score': self._max_score,\r\n 'child_attempts': 0,\r\n 'child_created': True,\r\n 'child_history': [{'answer': last_response}],\r\n })\r\n self.current_task = child_task_module(self.system, self.location,\r\n self.current_task_parsed_xml, self.current_task_descriptor,\r\n self.static_data,\r\n instance_state=current_task_state)\r\n self.task_states.append(self.current_task.get_instance_state())\r\n self.state = self.ASSESSING\r\n else:\r\n if self.current_task_number > 0 and not reset:\r\n current_task_state = self.overwrite_state(current_task_state)\r\n self.current_task = child_task_module(self.system, self.location,\r\n self.current_task_parsed_xml, self.current_task_descriptor,\r\n self.static_data,\r\n instance_state=current_task_state)\r\n\r\n return True", "def get_task(self, id=None, name=None):\n query = \"SELECT * FROM tangerine WHERE \"\n if id: query += \"id='\"+str(id)+\"'\"\n elif name: query += \"name='\"+name+\"' AND parent_job IS NULL\"\n else: return None\n \n cur = self.conn.cursor()\n cur.execute(query + \";\")\n self.conn.commit()\n task = cur.fetchone()\n \n if task:\n return Task(self.columns, task);\n else:\n return None", "def task(self, name):\n with self.db_lock:\n return self.rcon.hget(self.task_key, name)", "def config(self):\n if self._config is None:\n try:\n task_conf = CONF.workflow.tasks[self.task_type]\n except KeyError:\n task_conf = {}\n self._config = CONF.workflow.task_defaults.overlay(task_conf)\n return self._config", "def task_id(self) -> str:\n return self.get_from_redis(\"task_id\")", "def run(self):\n task_func = getattr(self, self.task_data.get('task_type'))\n task_obj = task_func()\n return task_obj", "def __getattribute__(self, name):\n if name in ('__setattr__', '__getattr__', '__delattr__', '_loop', '__curtask__'):\n return object.__getattribute__(self, name)\n\n if self._loop.is_running():\n return getattr(self.__curtask__, name)\n return getattr(tlocals, name)", "def check_task(self): \n return self.buffer[0]", "def get_target(self, ):\n return self.get_parameter('target')", "def Task(self):\n return self.create_task_cls()", "def make_task(self):\n return Task()", "def get_task_role(self):\n if self.default_task_role is None and self.delegate is None:\n raise ConfigException(\"No default task role defined on the config model\")\n\n if self.namespace_model_instance is None:\n raise ConfigException(\"ConfigModel instance can't get a default task role from a Namespace model reference without an instance of that model\")\n \n comp_ref = self.namespace_model_instance.get_inst_ref(self.default_task_role)\n comp_ref.fix_arguments()\n return comp_ref.value()", "def enumerate_new_task(self):\n if self.repeat_index < CONFIG.repeat_times:\n return Task(\n self.name, self.device, self.python_interpreter, self.tests, self.repeat_index + 1)\n if self.python_interpreter != CONFIG.python_interpreters[-1]:\n i = CONFIG.python_interpreters.index(self.python_interpreter)\n return Task(self.name, self.device, CONFIG.python_interpreters[i+1], self.tests, 1)\n return None", "def get_task(self, key: str) -> Task:\n raise NotImplementedError", "def first(self) -> Task:\n return self._tasks[0]", "def get_task(self,\n task_label=None,\n notebook_cell_text=None,\n print_return=True):\n\n self._print('Getting task {} ...'.format(task_label))\n\n if task_label:\n task = {task_label: self._tasks[task_label]}\n\n elif notebook_cell_text:\n task = self._load_task_from_notebook_cell(notebook_cell_text)\n\n else:\n raise ValueError(\n 'Get an existing task by querying for its ID or register a '\n 'task from a notebook cell.')\n\n if print_return: # For communicating with JavaScript\n print(dumps(task))\n return task", "def current_worker():\n try:\n return worker_thread_data.worker\n except AttributeError:\n return None", "def get_thread(self):\n return self.threads[self.thread_id]", "def get_parent_task(self) -> Optional[\"TaskNode\"]:\n triples = self.agent_memory.get_triples(subj=self.memid, pred_text=\"_has_parent_task\")\n if len(triples) == 0:\n return None\n elif len(triples) == 1:\n _, _, parent_memid = triples[0]\n return TaskNode(self.agent_memory, parent_memid)\n else:\n raise AssertionError(\"Task {} has multiple parents: {}\".format(self.memid, triples))", "def GetCurrent():\n global ENV\n return ENV[threading.current_thread().ident]", "def get_task_optimizer(self) -> torch.optim.Optimizer:\n pass", "def get_worker(self):\n return self.worker", "def detect_task_type(path):\n # distinguishing \"delay-response\" task or \"multi-target-licking\" task\n mat = spio.loadmat(path.as_posix(), squeeze_me=True, struct_as_record=False)\n GUI_fields = set(mat['SessionData'].SettingsFile.GUI._fieldnames)\n\n if ({'X_center', 'Y_center', 'Z_center'}.issubset(GUI_fields)\n and not {'SamplePeriod', 'DelayPeriod'}.issubset(GUI_fields)):\n task_type = 'multi-target-licking'\n else:\n task_type = 'delay-response'\n\n return task_type", "def _do_power_action(cls, task):\n if task is None:\n return\n result = vc_soap_util.get_task_state(task)\n return result", "def _get_current_device() -> Device | None:\n if task_runtime.has_environment():\n return task_runtime.get_current_devices()[0]\n return None", "def script_execution_get() -> str | None:\n if (data := script_execution_cv.get()) is None:\n return None\n return data.script_execution", "def get_current_task(self) -> Tuple[TaskId, any]:\n with self.local_redis.pipeline() as pipe:\n while True:\n try:\n # optimistic locking: https://realpython.com/python-redis/#using-redis-py-redis-in-python\n pipe.watch(TASK_ID_KEY)\n task_id: TaskId = deserialize(retry_get(pipe, TASK_ID_KEY))\n if task_id == self.cached_task_id:\n # print(f'[worker] Returning cached task {task_id}')\n break\n pipe.multi()\n pipe.get(TASK_DATA_KEY)\n # print(f'[worker] Getting new task {task_id}. Cached task was {self.cached_task_id}')\n self.cached_task_id, self.cached_task_data = task_id, deserialize(pipe.execute()[0])\n break\n except redis.WatchError:\n continue\n return self.cached_task_id, self.cached_task_data", "def auto(self):\n return self._auto", "def get_current_sm(self):\n if self.behaviour.initting:\n raise StateMachineNotReadyError\n \n return self.behaviour.current_sm", "def _get_task(self, task_id):\n if not task_id:\n return None\n task = objects.Transaction.get_by_uid(task_id, fail_if_not_found=False)\n if task and task.cluster_id == self.cluster.id:\n return task\n return False", "def current_operation(self):\n return self._current_operation", "def current_operation(self):\n return self._current_operation", "def __str__(self):\n return self.task", "def task_file(self) -> str:\n return self._task_file", "def get_taskToken(activity_task):\n try:\n return activity_task[\"taskToken\"]\n except KeyError:\n # No taskToken returned\n return None" ]
[ "0.77952605", "0.77952605", "0.75856113", "0.7273964", "0.70522803", "0.70522803", "0.70522803", "0.70522803", "0.70522803", "0.6897995", "0.6803446", "0.6802864", "0.67456496", "0.6690138", "0.6611777", "0.6603375", "0.6532655", "0.6522994", "0.64612824", "0.6443876", "0.63672", "0.62963295", "0.6276757", "0.62206924", "0.61999714", "0.61712605", "0.611179", "0.60965604", "0.60870284", "0.6066629", "0.6066371", "0.6029379", "0.60224134", "0.6020946", "0.60052884", "0.59572875", "0.5935212", "0.5935159", "0.59261835", "0.5915396", "0.5878335", "0.5875346", "0.5875346", "0.5875346", "0.5875346", "0.58698374", "0.586542", "0.58594567", "0.585409", "0.5828881", "0.582765", "0.5812148", "0.5798582", "0.578387", "0.5771615", "0.576875", "0.57467127", "0.57315993", "0.5715684", "0.56764305", "0.5672845", "0.565079", "0.5647819", "0.5632811", "0.56326616", "0.56168306", "0.5611", "0.5609201", "0.56045806", "0.55966884", "0.55910105", "0.55875325", "0.55863345", "0.557214", "0.5555924", "0.55513304", "0.5531931", "0.5506917", "0.550133", "0.5489296", "0.54889554", "0.5483513", "0.54742485", "0.5466719", "0.54577076", "0.54555553", "0.5450597", "0.5427471", "0.5421992", "0.5421907", "0.54181206", "0.54178065", "0.54153675", "0.54128414", "0.5408813", "0.54060215", "0.54060215", "0.5403351", "0.5402888", "0.54021955" ]
0.7787898
2
Gets the current results_path
def _get_results_path(self): # if we already have the results path set, please return it if self._results_path is not None: return self._results_path self._validate_results_path() path = self.results_path if path is None: for i in range(1, 10001): name = f"AutoML_{i}" if not os.path.exists(name): self.create_dir(name) self._results_path = name return name # If it got here, could not create, raise expection raise AutoMLException("Cannot create directory for AutoML results") elif os.path.exists(self.results_path) and os.path.exists( os.path.join(self.results_path, "params.json") ): # AutoML already loaded, return path self._results_path = path return path # Dir does not exist, create it elif not os.path.exists(path): self.create_dir(path) self._results_path = path return path # Dir exists and is empty, use it elif os.path.exists(path) and not len(os.listdir(path)): self._results_path = path return path elif os.path.exists(path) and len(os.listdir(path)): raise AutoMLException( f"Cannot set directory for AutoML. Directory '{path}' is not empty." ) raise AutoMLException("Cannot set directory for AutoML results")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def local_results(self):\n\n return self._local_results_path", "def get_result_path(self):\n return logPath", "def index_path(self):\n\t\treturn os.path.normpath(self.output + \"/\" + self.resultset_index)", "def remote_results(self):\n\n return self._remote_results_path", "def set_results_path(self):\n\n self.results_input.delete(0, END)\n path = set_path()\n self.results_input.insert(0, path)", "def saved_query_path(self) -> str:\n return self.join_path(self.rest_api_saved_query.arg)", "def get_resultfile(self):\r\n return self._resultfile", "def ml_predict_results_path(self) -> str:\n return join(self.machine_learning_path, 'results')", "def get_relative_path(self):\n return urlparse(self.browser.current_url).path", "def dir_results(assignment, user):\n return os.path.join(repository, assignment, user, 'results')", "def output_path(self):\n\n output_path = stringify(self._output_path)\n if output_path is None:\n with current_context() as ctx:\n output_path_relative = stringify(self.output_path_relative)\n if output_path_relative is not None:\n output_path = join_path(ctx.paths.output, output_path_relative)\n else:\n output_path = ctx.current.project.get_output_path(self.executor.output_type)\n return output_path", "def _current_script_dir(self):\n if self._script_dir:\n return self._script_dir[-1]\n else:\n return None", "def path( self ) :\n\n return( self.__path )", "def get_path(self):\n return self.path", "def getPath(self):\r\n\t\treturn self.pathToGoal", "def path(self):\n return self.path", "def getCurrentFilePath(self):\n return os.path.abspath(self.filePath)", "def logpath(self):\n return self.outpath", "def path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"path\")", "def current_directory (self):\r\n pass", "def __get_path(self):\n return self.path", "def _getResultsFileName(self, toilPath):\n return os.path.join(toilPath, \"results.txt\")", "def get_full_path(self):\n return self.path_display", "def get_path(self):\n\n return self._path", "def currentPreviewPath(self):\n logger.debug(\"Func: currentPreviewPath/getter\")\n if self._currentSceneInfo[\"SubProject\"] is not \"None\":\n path = os.path.join(self._pathsDict[\"previewsDir\"], self._currentSceneInfo[\"Category\"],\n self._currentSceneInfo[\"Name\"])\n else:\n path = os.path.join(self._pathsDict[\"previewsDir\"], self._currentSceneInfo[\"Category\"],\n self._currentSceneInfo[\"SubProject\"], self._currentSceneInfo[\"Name\"])\n return path\n # if os.path.isdir(path):\n # return path\n # else:\n # return \"\"", "def currentScenePath(self):\n logger.debug(\"Func: currentBaseScenePath/getter\")\n\n return os.path.join(self.projectDir, self._currentSceneInfo[\"Versions\"][self.currentVersionIndex-1][\"RelativePath\"])", "def curdir(self):\n return self.var('getcwd()')", "def path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"path\")", "def GetPath(self):\r\n\r\n return self.directory", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def path(self):\n return self._path", "def _fetch_results_dir(self, backend=None, results_dir=None):\n if backend is None and results_dir is not None:\n return results_dir\n elif backend is not None and results_dir is None:\n return Config().get_results_dir(backend)\n elif backend is None and results_dir is None:\n return self.nominal_results_dir\n else:\n raise ResultsAttributeError('Invalid combination of attributes!')", "def path(self):\n return self._selenium.current_url.replace(\n 'http://{}'.format(self._address), '')", "def execution_path(self, filename):\n return os.path.join(os.path.dirname(inspect.getfile(sys._getframe(0))), filename)", "def GetCurrentDir(self) -> str:\n ...", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def path(self) -> str:\n return pulumi.get(self, \"path\")", "def get_file_save_path(self):\n return self.out", "def localPath(self):\n return self.home", "def get_output_path(self):\n output_path = '%s/%s' % (\n os.path.expanduser(JOB_OUTPUT_PATH), self.get_unique_name())\n return output_path", "def return_output_path(self):\n # Return the path of the output file\n return os.path.join(self._output_file_location, self._output_filename)", "def current():\n result = run(\"ls -ld %(current_dir)s | awk '{print $11}'\" % env)\n return result.split('/')[-1]", "def get_current_url():\n return current_url", "def outpath(self):\n return None", "def current_url(self):\n return self.browser.current_url", "def path(self) :\n return self.m_path", "def path(self) -> str:\n return self._path", "def path(self) -> str:\n return self._path", "def path(self) -> str:\n return self._path", "def path(self) -> str:\n return self._path", "def path(self):\n return self.file_path()", "def work_dir(self):\n return self._work_dir", "def real_path(self):\n\t\treturn self.args[0]", "def get_path(self):\n return self.path", "def path(self):\n return self._dir_entry.path", "def data_abex_results_dir(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / \"Results\"", "def path(self):\n return pjoin(self._dj._jobsdir, self._status, self.full_name())", "def get_current_path(self):\r\n path_2 = self.path.replace('\\\\', '/')\r\n return self.copy_to_clipboard(path_2)", "def workspace_path(self):\n return self._path_temp", "def rel_path(self):\n return \"{}/{}\".format(Path(self.dir_path).basename, self.index_file)", "def getContextPath(self):\n pass", "def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)", "def _get_result_paths(self,data):\n return {'output':ResultPath(Path=data['output_path'],IsWritten=True)}", "def get_scanrecpath(self):\n start_key = min(self.obsinfos)\n scanrecname = self.obsinfos[start_key].obsfoldername(\n source_name=self.scanrecparms['pointing'])\n scanrecpath = os.path.join(self.scanpath, scanrecname)\n return scanrecpath", "def get_path(self):\n return self.sync_path", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def get_document_path(self):\n return pathlib.Path(urlparse(self.get_document().uri).path)", "def get_processed_path(self):\n location = self.get_storage().location\n return self.get_processed_key_name()[len(location):]", "def get_current_file_uri(self): # real signature unknown; restored from __doc__\n return \"\"", "def get_url(self):\n return self.base_driver.current_url", "def get_output_path():\n return os.getcwd() + \"/output/\"", "def output_path(self):\n return self._event.output_path", "def getPath(self):\n path = '/'.join(self.getPhysicalPath())\n return path", "def build_path(self):\r\n return self.selmgr.select_path()", "def output_file_path(self):\n return self.__output_file_path", "def get_report_path(self):\n report_path = os.path.join(logPath, \"report.html\")\n return report_path", "def _get_result_paths(self, data):\r\n # access data through self.Parameters so we know it's been cast\r\n # to a FilePath\r\n wd = self.WorkingDir\r\n od = self.Parameters['-o'].Value\r\n result = {}\r\n result['confusion_matrix'] = ResultPath(\r\n Path=join(od, 'confusion_matrix.txt'), IsWritten=True)\r\n result['cv_probabilities'] = ResultPath(\r\n Path=join(od, 'cv_probabilities.txt'), IsWritten=True)\r\n result['features'] = ResultPath(\r\n Path=join(od,\r\n 'feature_importance_scores.txt'),\r\n IsWritten=True)\r\n result['mislabeling'] = ResultPath(\r\n Path=join(od, 'mislabeling.txt'), IsWritten=True)\r\n result['summary'] = ResultPath(\r\n Path=join(od, 'summary.txt'), IsWritten=True)\r\n return result", "def getReturnPath(self):\r\n return self.msg[\"Return-Path\"]", "def get_location(self):\n return os.path.dirname(self.filename)", "def get_url(self):\n return self.driver.current_url", "def get_current_directory_uri(self): # real signature unknown; restored from __doc__\n return \"\"", "def rst_path(self):\n return Path(self.dir_path, self.index_file + \".rst\").abspath", "def working_dir(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"working_dir\")", "def get_processed_path(self):\n\n return self.processed_img_path", "def getIndexFilePath(self):\n return self.index_file_path", "def get_path(self):\n\n if not self.path:\n Settings.err_print(\"missing file path\")\n return \"\"\n return self.path", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "def artiq_results_path(experiment: Optional[str] = None) -> str:\n\n path = os.path.join(shared_area_path(), \"artiqResults\")\n\n if experiment is None:\n try:\n experiment = os.environ[\"OITG_EXPERIMENT\"]\n except KeyError:\n raise Exception(\n \"No experiment supplied, and no OITG_EXPERIMENT environment key\")\n\n return os.path.join(path, experiment)", "def get_results_filepath(self, name, source=None):\n for source, parser in mwcp.iter_parsers(name, source=source):\n file_name = parser.name + FILE_EXTENSION\n # Use hardcoded testcase directory if set.\n testcase_dir = mwcp.config.get(\"TESTCASE_DIR\")\n if testcase_dir:\n return os.path.join(testcase_dir, file_name)\n\n if source.is_pkg:\n # Dynamically pull based on parser's top level module.\n test_dir = pkg_resources.resource_filename(source.path, \"tests\")\n else:\n # If source is a directory, assume there is a \"tests\" folder within it.\n test_dir = os.path.join(source.path, \"tests\")\n\n return os.path.normpath(os.path.join(test_dir, file_name))\n\n raise ValueError(\"Invalid parser: {}\".format(name))" ]
[ "0.7681095", "0.7608964", "0.72525233", "0.71424973", "0.65749073", "0.65520835", "0.6454215", "0.6348574", "0.63169134", "0.6294443", "0.6256191", "0.6253854", "0.62473315", "0.6234381", "0.61912316", "0.61699665", "0.6166557", "0.6160031", "0.61576474", "0.6157143", "0.6109445", "0.6082885", "0.6077841", "0.60770625", "0.60767025", "0.6070803", "0.6063543", "0.60629946", "0.60629946", "0.6054768", "0.60322315", "0.60322315", "0.60322315", "0.60322315", "0.60322315", "0.60322315", "0.60322315", "0.60322315", "0.60322315", "0.60322315", "0.60322315", "0.60322315", "0.6016464", "0.59860563", "0.5980412", "0.597601", "0.59667975", "0.59667975", "0.5955385", "0.595229", "0.5945257", "0.5938569", "0.5937692", "0.59371257", "0.5919377", "0.59190685", "0.5918746", "0.5918377", "0.5918377", "0.5918377", "0.5918377", "0.5914533", "0.5901652", "0.58929074", "0.5866585", "0.58614326", "0.58598137", "0.5845588", "0.58414453", "0.5839099", "0.5837346", "0.5836327", "0.5836153", "0.5812982", "0.58109134", "0.5802858", "0.58003783", "0.580032", "0.5788356", "0.5783889", "0.5778255", "0.5767675", "0.5753989", "0.5753467", "0.57509077", "0.5745692", "0.5742575", "0.5741357", "0.57376003", "0.573496", "0.5728419", "0.5723237", "0.57198757", "0.57195467", "0.57138896", "0.57004815", "0.5699175", "0.56990534", "0.56988436", "0.5698096" ]
0.71246856
4
Gets the current total_time_limit
def _get_total_time_limit(self): self._validate_total_time_limit() if self._get_mode() == "Optuna": return None # there no training limit for model in the Optuna mode # just train and be happy with super models :) return deepcopy(self.total_time_limit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_limit(self) -> float:\n return self._time_limit", "def timelimit(self):\n return self._timelimit", "def get_limit_per_second(self):\n pass", "def time_limit(self):\n return 2503", "def get_current_timeout(cls):\n return cls.current().get_timeout()", "def timelimit_hard(self):\n return self._timelimit_hard", "def max_timeout(self):\n return self._max_timeout", "def secondsTotal(self)->int:\n return self._lic.params['maxSessionTime'].value", "def get_timeout(self) -> int:", "def time_limit(self):\n all_time_limit_updates = self.updates.exclude(\n time_limit_delta=timedelta())\n return self.time_limit_as_of_update(\n all_time_limit_updates.latest('id'))", "def normalized_total_time(p, max_time=3600000):\n if \"cdgp.wasTimeout\" in p and p[\"cdgp.wasTimeout\"] == \"true\":\n v = 3600000\n else:\n v = int(float(p[\"result.totalTimeSystem\"]))\n return max_time if v > max_time else v", "def max_time(self):\n return self._max_time", "def calls_remaining(self) -> int:\n return self.usage_limit - self.current_usage", "def get_request_limit(self, access_token):\n url = \"{0}/rate_limit?access_token={1}\"\n response = requests.get(url.format(self.ROOT_API_URL, access_token))\n data = response.json()\n return data['resources']['core'].get(\"remaining\")", "def runtime(self):\n return self.stop_time - self.start_time", "def calculate_timeout(self):\n return self.total_estimated_words() / self.minimum_wpm * 60", "def gettimeout(self):\r\n return self._timeout", "def _get_model_time_limit(self):\n self._validate_model_time_limit()\n return deepcopy(self.model_time_limit)", "def runtime(self):\n return (self.time - self.start).total_seconds()", "def time_remaining(self):\n with self._lock:\n deadline = self._expiration_manager.deadline()\n return max(0.0, deadline - time.time())", "def get_limit(self):\n return self._limit", "def get_limit(self):\n return self._limit", "def remaining_requests(self):\n try:\n return self._get_limit('Remaining')\n except ValueError:\n logging.error(\n \"Unable to gather limit statistics until log() has been called. Returning -1\")\n return -1", "def get_limit(self):\n return self.limit", "def getTotalTime(self):\n with self.lock:\n if self.tend == 0:\n total = -1\n else:\n total = self.tend - self.tstart\n return total", "def Remaining(self):\n if self._timeout is None:\n return None\n\n # Get start time on first calculation\n if self._start_time is None:\n self._start_time = self._time_fn()\n\n # Calculate remaining time\n remaining_timeout = self._start_time + self._timeout - self._time_fn()\n\n if not self._allow_negative:\n # Ensure timeout is always >= 0\n return max(0.0, remaining_timeout)\n\n return remaining_timeout", "def get_timeout(self):\n return self.timeout", "def get_time(self):\n return self._total_time", "def calculate_cpu_sleep_interval(cpulimit, percentused, elapsedtime):\n # Debug: Used to calculate averages\n #global totaltime, rawcpu, appstart\n\n # Return 0 if elapsedtime is non-positive\n if elapsedtime <= 0:\n return 0\n \n # Calculate Stoptime\n # Mathematically Derived from:\n # (PercentUsed * TotalTime) / ( TotalTime + StopTime) = CPULimit\n stoptime = max(((percentused * elapsedtime) / cpulimit) - elapsedtime , 0)\n\n # Print debug info\n #rawcpu += percentused*elapsedtime\n #totaltime = time.time() - appstart\n #print totaltime , \",\" , (rawcpu/totaltime) , \",\" ,elapsedtime , \",\" ,percentused\n #print percentused, elapsedtime\n #print \"Stopping: \", stoptime\n\n # Return amount of time to sleep for\n return stoptime", "def getRuntime(self):\n\n return self.endTime - self.startTime", "def _get_next_limit(self):\n return self.__quota", "def QueueStatResponseTimeOut(self):\n\t\treturn self._get_attribute('queueStatResponseTimeOut')", "def _get_next_timeout_interval(self):\n if len(self.timer_tasks) == 0:\n return sys.maxint\n else:\n next_timeout_interval = self.timer_tasks[0][0] - time.time()\n return next_timeout_interval", "def get_total_time(self):\n\t\treturn call_sdk_function('PrlStatCpu_GetTotalTime', self.handle)", "def get_current_time():\n\n now = dt.datetime.now()\n total_time = (now.hour * 3600) + (now.minute * 60) + (now.second)\n return total_time", "def remaining_bytes(self):\n try:\n return self._get_limit('remaining')\n except ValueError:\n logging.error(\n \"Unable to gather limit statistics until log() has been called. Returning -1\")\n return -1", "def pc_work_time_total(self):\n return _spacegrant_swig.udp_debug_sptr_pc_work_time_total(self)", "def MaxWaitTime(self):\r\n\t\treturn self._get_attribute('maxWaitTime')", "def size_limit(self):\n\t\treturn self._size_limit", "def tcp_speed_limit(self):\r\n return self._arm.tcp_speed_limit", "def timeout_time(self):\n if self.start_time is None:\n return None\n return self.start_time + self.timeout", "def max_delay_time(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"max_delay_time\")", "def elapsed_time(self) -> float:\n current_time = datetime.utcnow()\n start = self.start_time or current_time\n end = self.end_time or current_time\n return (end - start).total_seconds()", "def queued_time(self):\r\n return (self.node_monitor_launch_time - self.node_monitor_submit_time)", "def calls_used(self) -> float:\n return self.current_usage / self.usage_limit", "def remaining(self):\n if not self.enabled:\n return None\n duration = self.timeout - self.elapsed\n if self.timed_out: # check timed_out after duration for real-time correctness\n return 0\n return duration", "def last_used_tcp_speed(self):\r\n return self._arm.last_used_tcp_speed", "def remaining_ms():", "def FlowAggregatedStatResponseTimeOut(self):\n\t\treturn self._get_attribute('flowAggregatedStatResponseTimeOut')", "def pc_work_time_total(self):\n return _spacegrant_swig.general_burster_2_sptr_pc_work_time_total(self)", "def rate_limit_remaining(self):\n if os.path.isfile(self.rate_limit_filename):\n st = os.stat(self.rate_limit_filename)\n if time.time() - st.st_ctime > self.RATE_LIMIT_DURATION:\n return self.RATE_LIMIT_COUNT\n else:\n with open(self.rate_limit_filename, 'r') as f:\n failed_login_attempts = int(f.read())\n return max(0, self.RATE_LIMIT_COUNT - failed_login_attempts)\n else:\n return self.RATE_LIMIT_COUNT", "def limit(self):\n return self._limit", "def limit(self):\n return self._limit", "def pc_work_time_total(self):\n return _spacegrant_swig.hdlc_deframer_sptr_pc_work_time_total(self)", "def shared_runners_minutes_limit(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"shared_runners_minutes_limit\")", "def quota(self) -> int:\n return pulumi.get(self, \"quota\")", "def pc_work_time_total(self):\n return _spacegrant_swig.hdlc_framer_sptr_pc_work_time_total(self)", "def concurrent_tasks_limit(self):\n return self._concurrent_tasks_limit", "def GetTimeoutScale(self):\n return 30", "def pc_work_time_total(self):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_pc_work_time_total(self)", "def get(self):\n if self.running:\n return self.accumulated_time + pg.time.get_ticks() - self.start_time\n else:\n return self.accumulated_time", "def get_total_cpu_time() -> float:\n me = resource.getrusage(resource.RUSAGE_SELF)\n childs = resource.getrusage(resource.RUSAGE_CHILDREN)\n return me.ru_utime + me.ru_stime + childs.ru_utime + childs.ru_stime", "def pc_work_time_total(self):\n return _spacegrant_swig.message_debug_sptr_pc_work_time_total(self)", "def timeout_in_minutes(self) -> int:\n return pulumi.get(self, \"timeout_in_minutes\")", "def _get_time_interval_in_minutes(self):\n return self.visa.get_request_interval_in_minutes()", "def GetTimeoutScale(self):\n return 30.0", "def duration( self ):\n return (self.start and time.process_time()-self.start) or 0", "def FlowStatResponseTimeOut(self):\n\t\treturn self._get_attribute('flowStatResponseTimeOut')", "async def getDelayTimeTotal(self):\n delay_time_total = await self.director.getItemVariableValue(\n self.item_id, \"DELAY_TIME_TOTAL\"\n )\n return delay_time_total", "def rtt_max_ms(self):\n\n return represent_seconds_in_milliseconds(self.__rtt_max)", "def get_timed(self):\n ret = self.send(\"?T\", recv=True)\n ret = int(ret, 10)\n # FIXME: range?\n assert 1 <= ret <= 9999\n return ret", "def duration(self):\r\n return (self.end_time or time.time()) - self.start_time", "def elapsed(self):\n return datetime.datetime.now() - self.start", "def GetTimeoutScale(self):\n return 1.0", "def get_cpu_limit(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCpuLimit', self.handle)", "def pc_work_time_total(self) -> \"float\":\n return _beamforming_swig.doaesprit_sptr_pc_work_time_total(self)", "def pending_time(self):\n now = datetime.datetime.utcnow().replace(tzinfo=utc)\n timediff = now - self.time_requested\n return timediff", "def pc_work_time_total(self):\n return _spacegrant_swig.DeNRZI_sptr_pc_work_time_total(self)", "def TimeToRefill(self):\n # Get current timestamp in miliseconds from unix epoch\n now = int(time.time() * 1000)\n timeatrefile = self.status['timestamp'] + self.status['refillIn']\n\n timetorefil = timeatrefile - now + 1000 # plus one second fudge factor\n if timetorefil < 0:\n timetorefil = 0\n\n # Return value in seconds\n return timetorefil / 1000.0", "def _GetSecsUntilNextPass(self):\n op = util.CheckedOp('retrieving last GC pass time',\n self.engine.Get,\n KEY_LAST_PASS_TIME)\n\n if not op.success:\n return None\n\n last_compute = float(op.response_value) if op.response_value else 0\n return last_compute + self.gc_frequency - time.time()", "def cpu_time(self):", "def tcp_acc_limit(self):\r\n return self._arm.tcp_acc_limit", "def duration(self)->int:\n return self._lic.params['maxDurationInSeconds'].value", "def online_quota(self):\r\n return self.max_contributions - self.num_tickets_total", "def get_timeout(self):\n if self._timeout_time < 0:\n return TIMEOUT_NEVER\n elif self._timeout_time < TIMEOUT_ABSOLUTE_CUTOFF:\n return self._timeout_time\n else:\n timeout = self._timeout_time - time.time()\n if timeout < 0: timeout = 0.0 #expire immediately\n return timeout", "def fan_timer_duration(self) -> int:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"fan_timer_duration\"))\r\n return self._fan_timer_duration.seconds / 60", "def calc_total_wait(self, current_time_step):\n self.total_wait = current_time_step - self.time_entered\n return self.total_wait", "def time(self):\n return self._clock() - self._starttime", "def get_active_callmaxduration(self):\n try:\n obj_userprofile = UserProfile.objects.get(user=self.user)\n except UserProfile.DoesNotExist:\n return self.frequency\n\n callmaxduration = obj_userprofile.dialersetting.callmaxduration\n if callmaxduration < self.callmaxduration:\n return callmaxduration\n\n return self.callmaxduration", "def _get_max_cpu_usage(self) -> Optional[int]:\n max_cpu_usage = self._get_cgroups_max_cpu_usage()\n if not max_cpu_usage:\n # if no cgroups limit is in place, then maximum possible cpu usage depends on the number of available cpus\n max_cpu_usage = psutil.cpu_count() * 1000000 # number of cpus * microseconds in one second\n return max_cpu_usage", "def pc_work_time_total(self):\n return _spacegrant_swig.binary_sink_sptr_pc_work_time_total(self)", "def timeout(self, context):\n\n timeout = 0\n\n for task in flatten(self.tasks, context):\n task_timeout = DEFAULT_TASK_TIMEOUT\n task_details = getattr(task, '__garcon__', None)\n\n if task_details:\n task_timeout = task_details.get(\n 'timeout', DEFAULT_TASK_TIMEOUT)\n\n timeout = timeout + task_timeout\n\n return timeout", "def getTotalTimeOnline(self):\n return self.__totalTimeOnline", "def get_wait_time(self) -> int:\n next_ts = self.get_next_timestamp()\n if next_ts is None:\n return max(0, self.min_wait)\n return min((next_ts - parser.parse(self.event['timestamp'])).seconds, self.max_wait)", "def get_time(self):\n return self.get_timed() / 10.0", "def max_time(self) -> str:\n return self._max_time", "def _get_max_suppress_time(self):\n return self.__max_suppress_time", "def timeout(self):\n return self._timeout", "def timeout(self):\n return self._timeout", "def timeout(self):\n return self._timeout" ]
[ "0.83728045", "0.8202624", "0.7112865", "0.70121944", "0.7004678", "0.6932539", "0.67398685", "0.673961", "0.6735408", "0.672106", "0.66873527", "0.6636085", "0.66152275", "0.66026795", "0.65750027", "0.65488905", "0.6538491", "0.6528733", "0.6523565", "0.6522034", "0.6504964", "0.6504964", "0.6498144", "0.64736897", "0.6471144", "0.640793", "0.64067703", "0.640621", "0.6381395", "0.63751554", "0.6365669", "0.636343", "0.63496464", "0.6331505", "0.6328746", "0.6327548", "0.63205224", "0.6306167", "0.628293", "0.6271072", "0.6263255", "0.6258064", "0.62534183", "0.62430143", "0.6242814", "0.62370896", "0.6235399", "0.6234649", "0.62345403", "0.6233434", "0.6232813", "0.6231746", "0.6231746", "0.6229813", "0.6219914", "0.62189114", "0.6217765", "0.62149316", "0.6205185", "0.62013674", "0.6190303", "0.6182338", "0.6181725", "0.6174086", "0.61684006", "0.61485684", "0.6148501", "0.6143305", "0.6140901", "0.6139372", "0.61264986", "0.6120982", "0.6119965", "0.61196905", "0.6117366", "0.61097085", "0.61054426", "0.61049324", "0.61018246", "0.6094421", "0.6091798", "0.6090829", "0.6087915", "0.6087298", "0.6087234", "0.60832906", "0.60778064", "0.60768986", "0.607488", "0.607424", "0.606658", "0.6061047", "0.60592234", "0.6054356", "0.6053998", "0.60512424", "0.6046711", "0.6045939", "0.6045939", "0.6045939" ]
0.78830844
2
Gets the current model_time_limit
def _get_model_time_limit(self): self._validate_model_time_limit() return deepcopy(self.model_time_limit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def timelimit(self):\n return self._timelimit", "def time_limit(self) -> float:\n return self._time_limit", "def _get_total_time_limit(self):\n self._validate_total_time_limit()\n if self._get_mode() == \"Optuna\":\n return None # there no training limit for model in the Optuna mode\n # just train and be happy with super models :)\n return deepcopy(self.total_time_limit)", "def time_limit(self):\n all_time_limit_updates = self.updates.exclude(\n time_limit_delta=timedelta())\n return self.time_limit_as_of_update(\n all_time_limit_updates.latest('id'))", "def timelimit_hard(self):\n return self._timelimit_hard", "def get_current_timeout(cls):\n return cls.current().get_timeout()", "def time_limit(self):\n return 2503", "def get_timeout(self):\n return self.timeout", "def get_limit(self):\n return self.limit", "def get_limit_per_second(self):\n pass", "def max_time(self):\n return self._max_time", "def get_view_rate_limit():\n return getattr(g, '_view_rate_limit', None)", "def get_limit(self):\n return self._limit", "def get_limit(self):\n return self._limit", "def _get_max_suppress_time(self):\n return self.__max_suppress_time", "def max_timeout(self):\n return self._max_timeout", "def max_time(self) -> str:\n return self._max_time", "def gettimeout(self):\r\n return self._timeout", "def get_timeout(self) -> int:", "def limit(self):\n return self._limit", "def limit(self):\n return self._limit", "def check_engine_limits(current_rqmt, task):\n current_rqmt['time'] = min(168, current_rqmt.get('time', 1))\n return current_rqmt", "def get_rate_limit(self):\n resp = self._session.get(self.API_ROOT + \"/rate_limit\")\n log.info(resp.text)", "def MaxWaitTime(self):\r\n\t\treturn self._get_attribute('maxWaitTime')", "def max_delay_time(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_delay_time\")", "def max_delay_time(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_delay_time\")", "def limit(self):\n return self._owner.plan", "def _get_time_interval_in_minutes(self):\n return self.visa.get_request_interval_in_minutes()", "def max_time(self):\n #{{{ function to return time of last sample\n\n if self.maxtime == -1:\n return stock.now()\n\n return self.maxtime", "def timeout(self):\n return self._timeout", "def timeout(self):\n return self._timeout", "def timeout(self):\n return self._timeout", "def timeout(self):\n return self._timeout", "def get_max_record_limit(self):\n return self.max_record_limit", "def get_maxdelay(self) -> float:\n return self.maxdelay", "def time_limit(self, time_limit: float):\n if time_limit is not None and time_limit < 1.0: # noqa: E501\n raise ValueError(\"Invalid value for `time_limit`, must be a value greater than or equal to `1.0`\") # noqa: E501\n\n self._time_limit = time_limit", "def get_time(self):\n return self.time_param", "def max_time(self):\n return self._ll_tree_sequence.get_max_time()", "def _get_recordTtl(self):\n return self.__recordTtl", "def max(self):\n\n return time_stat(self, stat=\"max\")", "def max_delay_time(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"max_delay_time\")", "def GetTimeoutScale(self):\n return 30", "def last_optime(self):\n return self._last_optime", "def getdefaulttimeout():\r\n return default_timeout", "def timeout(self):\n return self._data.get('timeout')", "def _get_next_limit(self):\n return self.__quota", "def find_tim(self):\n start_max = 0\n finish_max = 0\n op_mode = self.op_number + ',' + self.mode_number\n for resource in self.resources:\n end_time = resource.usage[op_mode][\"start_time\"] + resource.usage[op_mode][\"duration\"]\n if end_time > finish_max:\n finish_max = end_time\n start_max = resource.usage[op_mode][\"start_time\"]\n self.tim = finish_max\n self.sim = start_max", "def QueueStatResponseTimeOut(self):\n\t\treturn self._get_attribute('queueStatResponseTimeOut')", "def timeout(self):\n\n return self._timeout", "def timeout(self):\n\n return self._timeout", "def get_default_timeout(self):\n return self._timeout", "def set_timelimit(self, timelimit):\n self._timelimit = timelimit", "def unlock_time(self, obj):\n unlock_duration = datetime.timedelta(minutes=RateLimitModelBackend.minutes)\n return obj.modified + unlock_duration", "def capture_limit(self):\n return self._capture_limit", "def size_limit(self):\n\t\treturn self._size_limit", "def max_retire_time(self):\n return self._max_retire_time", "def limit(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"limit\")", "def use_time(self) -> int:\n return self._use_time", "def timeout_in_minutes(self) -> int:\n return pulumi.get(self, \"timeout_in_minutes\")", "def timeout(self) -> str:\n return pulumi.get(self, \"timeout\")", "def shared_runners_minutes_limit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"shared_runners_minutes_limit\")", "def shared_runners_minutes_limit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"shared_runners_minutes_limit\")", "def GetTimeoutScale(self):\n return 30.0", "def MaxLifetime(self):\r\n\t\treturn self._get_attribute('maxLifetime')", "def get_timeout(self):\r\n a = self.get_attributes('VisibilityTimeout')\r\n return int(a['VisibilityTimeout'])", "def get_time(self):\n return self._current_time_sec", "def time_limit(self, limit=None):\n if limit is None:\n done, data = self._request('G3')\n if done:\n return int(data[0])*15\n else:\n limit = int(round(limit/15.0))\n if self._request('S3', str(limit))[0]:\n return limit\n\n raise EvseError", "def get_time(self):\n return self._current_time", "def get_time(self):\n return self._current_time", "def gettimeout(self):\r\n return self.sock.gettimeout()", "def gettimeout(self):\r\n return self.sock.gettimeout()", "def get_current_time(self):\n return self.time", "def timeSinceQuery(self):\n return self.time - datetime.now().time()", "def GetTimeoutScale(self):\n return 1.0", "def get_throttle_interval():\n return const.THROTTLE_INTERVAL", "def _get_end_time(self):\n return self.__end_time", "def get_timeout_millis(self):\n return self.dp.get_timeout_millis()", "def adaptive_limit(self) -> int:\n return pulumi.get(self, \"adaptive_limit\")", "def concurrent_tasks_limit(self):\n return self._concurrent_tasks_limit", "def get_timeout(self):\n if self._timeout_time < 0:\n return TIMEOUT_NEVER\n elif self._timeout_time < TIMEOUT_ABSOLUTE_CUTOFF:\n return self._timeout_time\n else:\n timeout = self._timeout_time - time.time()\n if timeout < 0: timeout = 0.0 #expire immediately\n return timeout", "def FlowStatResponseTimeOut(self):\n\t\treturn self._get_attribute('flowStatResponseTimeOut')", "def time_from_now(self, **options):\n return self.time_from(self.now())", "def gettime(self):\n return self.t", "def get_time(self) -> float:\n raise NotImplementedError()", "def rtt_max_ms(self):\n\n return represent_seconds_in_milliseconds(self.__rtt_max)", "def get_min_tim(self):\n return self.get_shortest_mode().tim", "def last_tick_time(self):\n return self.last_tick_", "def limit(self):\n if self._limit:\n return self._limit\n else: # no custom limit, go with the default\n return PublicAppPlan", "def set_time_limit(arg):\n pass", "def QueueConfigResponseTimeOut(self):\n\t\treturn self._get_attribute('queueConfigResponseTimeOut')", "def lasttime(self):\n if hasattr(self, \"_lasttime\"):\n return self._lasttime\n else:\n return None", "def time_to_target(self) -> str:\r\n # TODO: Figure out how to implement this.\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"time_to_target\"))\r\n return self._time_to_target_options[0]", "def option_request_wait_time(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionRequestWaitTime/')))", "def get_request_limit(self, access_token):\n url = \"{0}/rate_limit?access_token={1}\"\n response = requests.get(url.format(self.ROOT_API_URL, access_token))\n data = response.json()\n return data['resources']['core'].get(\"remaining\")", "def timeout_minutes(self) -> Optional[int]:\n return pulumi.get(self, \"timeout_minutes\")", "def get_limit(self):\r\n\r\n limit = self.request_data.get('limit', self.limit)\r\n if limit is None:\r\n limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)\r\n\r\n try:\r\n limit = int(limit)\r\n except ValueError:\r\n raise BadRequest(\r\n \"Invalid limit '%s' provided. Please provide a positive integer.\" % limit)\r\n\r\n if limit < 0:\r\n raise BadRequest(\"Invalid limit '%s' provided. Please provide a positive integer >= 0.\" % limit)\r\n\r\n if self.max_limit and (not limit or limit > self.max_limit):\r\n # If it's more than the max, we're only going to return the max.\r\n # This is to prevent excessive DB (or other) load.\r\n return self.max_limit\r\n\r\n return limit", "def _get_delay(self):\n delay = int(60 / self.REQUESTS_PER_MINUTE * len(accounts.get_all()))\n return delay", "def _get_delay(self):\n delay = int(60 / self.REQUESTS_PER_MINUTE * len(accounts.get_all()))\n return delay", "def timeout(self) -> Optional[float]:\n return self._timeout", "def GetAntLimit(cls):\n return cls.antLimit" ]
[ "0.808775", "0.7704996", "0.73726135", "0.7298717", "0.72923505", "0.6901317", "0.6841979", "0.66637945", "0.6617291", "0.6602201", "0.6576346", "0.6545491", "0.6522294", "0.6522294", "0.62877345", "0.6281024", "0.62770474", "0.6243926", "0.62277204", "0.62133306", "0.62133306", "0.61373436", "0.6106775", "0.61018616", "0.60987335", "0.60987335", "0.5951156", "0.5906317", "0.5894278", "0.5867684", "0.5867684", "0.5867684", "0.5867684", "0.58561295", "0.58528674", "0.58488715", "0.58234304", "0.5801187", "0.5785137", "0.57784045", "0.5755732", "0.57494473", "0.5748576", "0.57396555", "0.5734709", "0.5730458", "0.57282555", "0.5726176", "0.5724839", "0.5724839", "0.5721367", "0.5691691", "0.56908286", "0.5683591", "0.5666476", "0.56608313", "0.56496656", "0.56112653", "0.56109136", "0.56090105", "0.5603286", "0.5603286", "0.55954", "0.5580811", "0.55748564", "0.55696803", "0.5557213", "0.5548149", "0.5548149", "0.5544891", "0.5544891", "0.5541083", "0.55391043", "0.5536223", "0.5532332", "0.55227065", "0.5519373", "0.5517108", "0.5515588", "0.55134964", "0.55051947", "0.54936814", "0.5492957", "0.5491744", "0.5489327", "0.54882956", "0.5483704", "0.54820675", "0.5480454", "0.54761916", "0.5473458", "0.5472664", "0.5462756", "0.54627556", "0.5459763", "0.54569906", "0.5455067", "0.5455067", "0.5451541", "0.5451514" ]
0.8786821
0
Gets the current algorithms. If "auto" it is determined
def _get_algorithms(self): self._validate_algorithms() if self.algorithms == "auto": if self._get_mode() == "Explain": return [ "Baseline", "Linear", "Decision Tree", "Random Forest", "Xgboost", "Neural Network", ] if self._get_mode() == "Perform": return [ "Linear", "Random Forest", "LightGBM", "Xgboost", "CatBoost", "Neural Network", ] if self._get_mode() == "Compete": return [ "Decision Tree", "Linear", "Random Forest", "Extra Trees", "LightGBM", "Xgboost", "CatBoost", "Neural Network", "Nearest Neighbors", ] if self._get_mode() == "Optuna": return [ "Random Forest", "Extra Trees", "LightGBM", "Xgboost", "CatBoost", "Neural Network", ] else: return deepcopy(self.algorithms)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def algorithms():\n algorith_paradigms = ['Divide-and-conquer', 'Backtrackig', 'Greedy-Algorithms', 'Dynamic-programming']\n return algorith_paradigms", "def get_algorithm(self):\n return self.alg", "def get_algorithm(self):\n pass", "def algorithms(self):\n if self._algorithms is None:\n uri = \"/loadbalancers/algorithms\"\n resp, body = self.method_get(uri)\n self._algorithms = [alg[\"name\"] for alg in body[\"algorithms\"]]\n return self._algorithms", "def get_alg(self):\r\n raise NotImplementedError", "def _get_algorithm(self, **options):\n\n raise CoreNotImplementedError()", "def algorithm(self) -> str:\n return pulumi.get(self, \"algorithm\")", "def algorithm(self) -> str:\n return pulumi.get(self, \"algorithm\")", "def algorithm(self):\n return self._algorithm", "def algorithms_factory():\n all_algorithms = []\n for algorithm_module in ALGORITHMS:\n module_name = \"{}.{}\".format(PREFIX, algorithm_module)\n module = importlib.import_module(module_name)\n for item in dir(module):\n item = getattr(module, item)\n try:\n if issubclass(item, base.Algorithm):\n item.is_implemented()\n else:\n continue\n except (exceptions.AlgorithmsNotImplemented, TypeError):\n continue\n\n all_algorithms.append(item)\n\n return all_algorithms", "def _available_algorithms(**_: str) -> Set[str]:\n avail = set()\n pass2 = set()\n for algo in hashlib.algorithms_available:\n lalgo = algo.lower()\n if \"with\" in lalgo:\n continue # skip apparently redundant ones\n if lalgo != algo:\n pass2.add(algo)\n else:\n avail.add(lalgo)\n for algo in pass2:\n if algo.lower() not in avail:\n avail.add(algo)\n return avail", "def get_algorithm(self):\n if self.ALGO_INFO not in self._data_dict:\n return None\n algo_group = dao.find_group(self._data_dict[self.ALGO_INFO]['module'], \n self._data_dict[self.ALGO_INFO]['class'],\n self._data_dict[self.ALGO_INFO]['init_param'])\n if algo_group:\n algorithm = dao.get_algorithm_by_group(algo_group.id, self._data_dict[self.ALGO_INFO]['identifier'])\n return algorithm\n return None", "def algorithm(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"algorithm\")", "def Algorithm(self):\n return self._get_attribute('algorithm')", "def algorithm(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"algorithm\")", "def algorithm(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"algorithm\")", "def __top_Algs_ ( self ) :\n _algs = self.TopAlg\n\n def _alg_name_ ( _n ):\n\n _p = _n.rfind('/')\n if 0 > _p : return _n\n return _n[_p:]\n\n def _pyAlg ( _n ) :\n for _a in self.pyalgorithms :\n if _n == _a.name() : return _a\n return None \n \n algs = [] \n for _a in _algs :\n # get the proper name \n _n = _alg_name_ ( _a )\n # check if it is pyalgorithm:\n _pa = _pyAlg ( _n )\n if _pa :\n algs += [ _pa ]\n else :\n _alg = self.algorithm ( _a , True )\n algs += [ _alg ]\n \n return algs", "def __top_Algs_ ( self ) :\n _algs = self.TopAlg\n\n def _alg_name_ ( _n ):\n\n _p = _n.rfind('/')\n if 0 > _p : return _n\n return _n[_p:]\n\n def _pyAlg ( _n ) :\n for _a in self.pyalgorithms :\n if _n == _a.name() : return _a\n return None \n \n algs = [] \n for _a in _algs :\n # get the proper name \n _n = _alg_name_ ( _a )\n # check if it is pyalgorithm:\n _pa = _pyAlg ( _n )\n if _pa :\n algs += [ _pa ]\n else :\n _alg = self.algorithm ( _a , True )\n algs += [ _alg ]\n \n return algs", "def __all_Algs_ ( self ) :\n _algs = self.algorithms()\n\n algs = []\n for _a in _algs :\n algs += [ self.algorithm ( _a ) ]\n return algs", "def __all_Algs_ ( self ) :\n _algs = self.algorithms()\n\n algs = []\n for _a in _algs :\n algs += [ self.algorithm ( _a ) ]\n return algs", "def _get_algorithm(self, **options):\n\n return 'TEST'", "def alg(self) -> t.Optional[str]:\n return self._alg", "def initialize_algorithm(algo_cls: Type):\n if algo_cls == algorithms.Alibi:\n return algo_cls(max_sequence_length=1)\n elif algo_cls == algorithms.StochasticDepth:\n return algo_cls(target_layer_name='ResNetBottleneck')\n elif algo_cls == algorithms.FusedLayerNorm or algorithms.LowPrecisionLayerNorm:\n pytest.importorskip('apex')\n return algo_cls()\n elif algo_cls == algorithms.GatedLinearUnits:\n pytest.importorskip('transformers')\n return algo_cls()\n elif algo_cls == algorithms.Factorize:\n return algo_cls(min_features=48, latent_features=24)\n elif algo_cls == algorithms.SqueezeExcite:\n return algo_cls(min_channels=32)\n else:\n return algo_cls()", "def get_session_algorithms(self): # real signature unknown; restored from __doc__\n return \"\"", "def get_common_algorithm(external, prefered=None):\n if prefered is not None:\n if prefered in external:\n return prefered\n for alg in ALGORITHMS:\n if alg in external:\n return alg\n raise ValueError(\"No common algorithm found\")", "def get_algo_info(self, algo=None, **kwargs):\n if algo:\n return self.mrr_obj.get('/info/algos' + '/' + algo, **kwargs)\n return self.mrr_obj.get('/info/algos')", "def _get_algorithm(name: str) -> Any:\n algo_cls = getattr(hashes, name.upper(), None) # hack: get class object by name\n if algo_cls is None:\n raise ValueError(f'Unsupported algorithm: hashes.{name}'.format(name=name.upper()))\n\n return algo_cls() # pylint: disable=not-callable", "def algorithm(self):\n\n if self.__algorithm:\n return self.__algorithm\n if isinstance(self, Map): return None\n return self.ancestor.algorithm", "def _get_strategies(self) -> Dict[str, str]:\n strategies = [method for method in dir(self) if STRATEGY_IDENTIFIER in method]\n\n if not strategies:\n logger.warning(\n \"There are no strategy provided. \"\n \"Make sure the implemented strategy methods \"\n \"start contain the '%s' term.\" % STRATEGY_IDENTIFIER\n )\n return {str(n_method): method for n_method, method in enumerate(strategies)}", "def get_tracker(algorithm):\n if algorithm == \"default\":\n algorithm = \"peaks\"\n\n if algorithm == \"peaks\":\n from fingertracker_peaks import FingerTrackerPeaks\n return FingerTrackerPeaks\n elif algorithm == \"skeleton\":\n from fingertracker_skeleton import FingerTrackerSkeleton\n return FingerTrackerSkeleton\n else:\n print \"Unknown algorithm: {}\".format(algorithm)", "def generators(self, algorithm=\"farey\"):\n if self.level() == 1:\n # we return a fixed set of generators for SL2Z, for historical\n # reasons, which aren't the ones the Farey symbol code gives\n return [ self([0,-1,1,0]), self([1,1,0,1]) ]\n\n elif algorithm==\"farey\":\n return self.farey_symbol().generators()\n\n elif algorithm==\"todd-coxeter\":\n from sage.modular.modsym.p1list import P1List\n from .congroup import generators_helper\n level = self.level()\n if level == 1: # P1List isn't very happy working mod 1\n return [ self([0,-1,1,0]), self([1,1,0,1]) ]\n gen_list = generators_helper(P1List(level), level)\n return [self(g, check=False) for g in gen_list]\n\n else:\n raise ValueError(\"Unknown algorithm '%s' (should be either 'farey' or 'todd-coxeter')\" % algorithm)", "def choose_algorithm(self):\n\n # if case insensitive lowercase patterns\n for i in range(len(self.patterns)):\n if self.case_insensitive:\n self.patterns[i] = self.patterns[i].lower()\n\n # naive matcher option\n if self.naive:\n matcher = NaiveStringMatcher(self.patterns)\n return matcher\n\n # AHC matcher by default\n matcher = State.create_automaton(self.patterns)\n return matcher", "def get_custom_algorithms_to_run(current_skyline_app, base_name, custom_algorithms, debug):\n if debug:\n current_skyline_app_logger = current_skyline_app + 'Log'\n current_logger = logging.getLogger(current_skyline_app_logger)\n else:\n current_logger = None\n\n custom_algorithms_to_run = {}\n for custom_algorithm in custom_algorithms:\n use_with_current_skyline_app = False\n try:\n use_with = custom_algorithms[custom_algorithm]['use_with']\n if current_skyline_app in use_with:\n use_with_current_skyline_app = True\n except:\n use_with_current_skyline_app = None\n if not use_with_current_skyline_app:\n continue\n namespaces = []\n try:\n namespaces = custom_algorithms[custom_algorithm]['namespaces']\n except:\n namespaces = []\n algorithm_source = None\n consensus = None\n algorithms_allowed_in_consensus = []\n run_custom_algorithm = False\n max_execution_time = None\n if namespaces:\n for namespace in namespaces:\n if not run_custom_algorithm:\n try:\n run_custom_algorithm, run_custom_algorithm_matched_by = matched_or_regexed_in_list(current_skyline_app, base_name, [namespace])\n except:\n pass\n if run_custom_algorithm:\n if debug:\n current_logger.debug('debug :: get_custom_algorithms_to_run :: %s - namespace - %s, run_custom_algorithm_matched_by %s' % (\n base_name, str(namespace), str(run_custom_algorithm_matched_by)))\n break\n if run_custom_algorithm:\n try:\n algorithm_source = custom_algorithms[custom_algorithm]['algorithm_source']\n except:\n algorithm_source = None\n try:\n algorithm_parameters = custom_algorithms[custom_algorithm]['algorithm_parameters']\n except:\n algorithm_parameters = {}\n try:\n max_execution_time = custom_algorithms[custom_algorithm]['max_execution_time']\n except:\n max_execution_time = 0.05\n try:\n consensus = int(custom_algorithms[custom_algorithm]['consensus'])\n except:\n consensus = 0\n try:\n algorithms_allowed_in_consensus = custom_algorithms[custom_algorithm]['algorithms_allowed_in_consensus']\n except:\n algorithms_allowed_in_consensus = []\n try:\n debug_logging = custom_algorithms[custom_algorithm]['debug_logging']\n except:\n debug_logging = False\n if debug:\n current_logger.debug('debug :: get_custom_algorithms_to_run :: %s - custom_algorithm - %s, max_execution_time - %s' % (\n base_name, str(custom_algorithm), str(max_execution_time)))\n if run_custom_algorithm and algorithm_source:\n try:\n custom_algorithms_to_run[custom_algorithm] = {\n 'namespaces': namespaces,\n 'algorithm_source': algorithm_source,\n 'algorithm_parameters': algorithm_parameters,\n 'max_execution_time': max_execution_time,\n 'consensus': consensus,\n 'algorithms_allowed_in_consensus': algorithms_allowed_in_consensus,\n 'debug_logging': debug_logging\n }\n if debug:\n current_logger.debug('debug :: get_custom_algorithms_to_run :: %s - custom_algorithms_to_run - %s' % (\n base_name, str(custom_algorithms_to_run)))\n except:\n if debug:\n current_logger.error(traceback.format_exc())\n current_logger.error('error :: get_custom_algorithms_to_run :: failed to create dict for %s' % (\n base_name))\n else:\n pass\n return custom_algorithms_to_run", "def get_algoIO(algo):\n algoIOFile = get_data_in_paths('algoIO.json', search_path)\n with open(algoIOFile) as algoIO:\n algoIODic = json.load(algoIO)\n if algo in algoIODic.keys():\n return algoIODic[algo]\n else:\n return {'decision': 'unknown algorithm'}", "def key_algorithm(self) -> str:\n return pulumi.get(self, \"key_algorithm\")", "def get_sb_class_for_algo(algo: str) -> BaseAlgorithm:\n if algo == 'AC2':\n return A2C\n elif algo == 'DDPG':\n return DDPG\n elif algo == 'DQN':\n return DQN\n elif algo == 'HER':\n return HER\n elif algo == 'PPO':\n return PPO\n elif algo == 'SAC':\n return SAC\n elif algo == 'TD3':\n return TD3\n else:\n raise NotImplementedError(f'The {algo} sb algorithm is not yet supported!')", "def _get_modes(self):\n return self.__modes", "def get_tools(cls):\n pass", "def algorithm(self):\n raise NotImplementedError(\n \"Please implement algorithm specific algorithm @property method\"\n )", "def identify_algorithm(model_initializer):\n # FLAG: Will need different way to handle neural network libraries (keras, pytorch, skorch)\n\n try:\n if isinstance(model_initializer, partial):\n algorithm_name = model_initializer.func.__name__\n else:\n algorithm_name = model_initializer.__name__\n except AttributeError:\n algorithm_name = type(model_initializer).__name__\n\n try:\n module_name = model_initializer.__module__.split(\".\")[0]\n except AttributeError:\n module_name = model_initializer.func.__module__.split(\".\")[0]\n\n return algorithm_name, module_name", "def _choose_algorithm(vehicle, xbee, neighbors):\n if shared.AGENT_CHARACTER == 'square':\n return mas.SquareRoute(vehicle, xbee)\n \n elif shared.AGENT_CHARACTER == 'passive':\n return mas.PassiveLeader(vehicle, xbee)\n \n elif shared.CURRENT_ALGORITHM == 'Vicsek':\n return mas.Vicsek(vehicle, xbee, neighbors)\n \n elif shared.CURRENT_ALGORITHM == 'MPC':\n return mas.Decentralized(vehicle, xbee, neighbors)", "def algorithmInfo():\n\t\treturn r\"\"\"TODO\"\"\"", "def get_sklearn_algorithms(verbose=False):\n from collections import defaultdict\n import importlib\n import sklearn\n algos = defaultdict(list)\n if verbose: print(dir(sklearn))\n for nom_module in dir(sklearn):\n if verbose: print(nom_module)\n try:\n to_import = \"sklearn.%s\" % nom_module\n module = importlib.import_module(to_import)\n for nom_fonction in dir(module):\n fonction = getattr(module, nom_fonction)\n if hasattr(fonction, \"fit\"):\n if verbose: print(\" nom algorithme = \", nom_fonction)\n algos[nom_module].append(fonction)\n except Exception as e:\n if verbose: print(e)\n if verbose: print(\"=\" * 30)\n return algos", "def _interface_methods_ ( self ) :\n \n if not self._itool : return tuple()\n \n my_methods = list ( dir ( GaudiPython.Bindings.iAlgTool ) )\n my_methods += [ i for i in dir ( cpp.IAlgTool ) ] \n my_methods += [ i for i in dir ( cpp.IInterface ) ]\n my_methods += [ i for i in dir ( cpp.IStateful ) ]\n if self._ip : my_methods += [ i for i in dir ( self._ip ) ]\n my_methods = set( my_methods )\n if_methods = set() \n for i in dir( self._itool ) :\n if i in my_methods : continue\n if_methods.add ( i )\n \n return tuple( if_methods )", "def describe_algorithm(AlgorithmName=None):\n pass", "def available_functions(self):\n return self.config.keys()", "def choose_algorithm(self,\n algorithm: str) -> None:\n # Initialize the empty string for selected algorithm\n self.selected_algorithm = ''\n\n # Executing the selected algorithm with respect to value of string\n if algorithm is not None:\n # First string value indicates the backbone of approach\n backbone = algorithm.split('-')[0]\n # Check the possible options\n if backbone == 'KNN' and len(algorithm.split('-')) == 3:\n print('KNN model based collaborative approach is selected!')\n similarity_measure = algorithm.split('-')[1]\n user_based = algorithm.split('-')[2]\n if (similarity_measure == 'cosine' or similarity_measure == 'msd' or similarity_measure == 'pearson' or similarity_measure == 'pearson_baseline') and (user_based == 'user'):\n self.selected_algorithm = KNNBaseline(sim_options={'name': similarity_measure,\n 'user_based': True})\n elif (similarity_measure == 'cosine' or similarity_measure == 'msd' or similarity_measure == 'pearson' or similarity_measure == 'pearson_baseline') and (user_based == 'item'):\n self.selected_algorithm = KNNBaseline(sim_options={'name': similarity_measure,\n 'user_based': False})\n else:\n raise ValueError('The given parameters {} for KNN algorithm is not recognized. Please check the propoer format'.format(str(backbone) + '-' + str(similarity_measure) + '-' + str(user_based)))\n elif backbone == 'SVD' and len(algorithm.split('-')) == 3:\n print('SVD model based collaborative approach is selected!')\n epoch = int(algorithm.split('-')[1])\n learning_rate = float(algorithm.split('-')[2])\n self.selected_algorithm = SVD(n_epochs=epoch, lr_all=learning_rate)\n elif backbone == '':\n raise ValueError('Please give an algorithm! Algorithm cannot be left empty')\n else:\n raise ValueError(\n 'The given algorithm format {} is not recognized. Please check the propoer format'.format(\n str(backbone)))\n elif algorithm is None:\n # Raise an error message for None value for algorithm\n raise ValueError('Please give an algorithm! Algorithm cannot be left empty')\n return", "def algorithm(self, alg, language):\n with open('solve-manual.json') as f:\n manual = json.load(f)\n\n solution = []\n for notation in alg.split(' '):\n solution.append(manual[language][notation])\n return solution", "def bookAlgorithms(config, visitor):\n import imp\n import os\n\n CLI = config.getFolder(\"CLI+\")\n # flag indicating to run a robust analysis\n robust = CLI.getTagBoolDefault(\"robust\",False)\n # flag indicating to run a dummy analysis\n dummy = CLI.getTagBoolDefault(\"dummy\",False)\n\n # book any algorithms\n for algorithmscript_TString in config.getTagVString(\"algorithms.snippets\"):\n QFramework.TQStringUtils.removeLeadingBlanks(algorithmscript_TString)\n QFramework.TQStringUtils.removeTrailingBlanks(algorithmscript_TString)\n QFramework.TQStringUtils.removeTrailingText(algorithmscript_TString, \".py\")\n algorithmscript = algorithmscript_TString.Data()\n found_modules = []\n algorithmsDirs = config.getTagVStandardString(\"algorithms.directories\")\n # search through the directories provided in the config\n for algorithmsPath in algorithmsDirs:\n module = QFramework.TQFolder.concatPaths(algorithmsPath, algorithmscript) + \".py\"\n module = common.findConfigPath(module, False)\n # findConfigPath returns \"\" if no module was found\n if len(module) > 0:\n # snippet was found in this directory -\n # add its absolute path and the directory it was found in\n # to a list in tuple form\n found_modules.append((module, algorithmsPath))\n if len(found_modules) == 0:\n # check CommonAnalysisHelpers for an algorithm snippet as fall-back\n CAHAlgorithmsDir = \"CommonAnalysisHelpers/share/algorithms\"\n algorithmsDirs.push_back(CAHAlgorithmsDir)\n module = QFramework.TQFolder.concatPaths(CAHAlgorithmsDir, algorithmscript) + \".py\"\n module = QFramework.TQPathManager.findFileFromEnvVarWithoutExecDir(module, \"CAFCOREDIR\")\n if len(module) > 0:\n found_modules.append((module, CAHAlgorithmsDir))\n print(len(found_modules))\n # continue only if there was one match found\n if len(found_modules) == 0:\n QFramework.BREAK(\"No module found for '{:s}' in the custom algorithm directories provided:\\n{:s}\\n\".format(algorithmscript,', '.join(algorithmsDirs))+\n \"Please make sure that there exists a snippet by the name of '{:s}.py' available in one of them.\\n\".format(algorithmscript))\n elif len(found_modules) > 1:\n QFramework.BREAK(\"Ambiguity detected while resolving custom algorithm snippet location. Multiple modules found for {:s} in the custom algorithm directories provided:\\n{:s}\\n\".format(algorithmscript,', '.join(algorithmsDirs))+\n \"Consider placing the {:s}.py snippet only in a common directory if it's used by more than one (sub)analysis.\".format(algorithmscript))\n abs_path = found_modules[0][0]\n module_name = os.path.basename(abs_path).rstrip(\".py\")\n relative_path = QFramework.TQFolder.concatPaths(found_modules[0][1], algorithmscript)+\".py\"\n QFramework.START(\"l.\",\"loading algorithms from '{:s}'\".format(str(relative_path)))\n try:\n addalgorithms = imp.load_source(module_name, abs_path)\n added = addalgorithms.addAlgorithms(visitor,config)\n if added:\n QFramework.END(QFramework.TQMessageStream.OK)\n else:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n QFramework.BREAK(\"unable to properly setup custom algorithms\")\n except IOError as error:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n QFramework.BREAK(\"unable to open file '{:s}' - please double-check!\\n\".format(abs_path)+\"Message from python:\\n\"+str(error))\n except NameError as error:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n if not robust and not dummy:\n QFramework.BREAK(\"syntax error in algorithm snippet '{:s}' - please double-check!\\n\".format(abs_path)+\"Message from python:\\n\"+str(error))\n except AttributeError as error:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n if not robust and not dummy:\n QFramework.BREAK(\"attribute error in algorithm snippet '{:s}' - please double-check!\\n\".format(abs_path)+\n \"If the message from python below is\\n'module' object has no attribute 'addAlgorithms'\\nplease make sure that the snippet has the function addAlgorithms() defined.\\n\"\n \"Message from python:\\n\"+str(error))\n\n # only try and do the xAOD skimming configuration below if we are running with the MCASV\n # since all channels should be considered at the same time (e.g. systematic variations)\n if isinstance(visitor,QFramework.TQMultiChannelAnalysisSampleVisitor):\n # TODO: these two lines are also done in bookAnalysisJobs\n xAODdumpingConfig = QFramework.TQTaggable()\n dumpXAODs = (xAODdumpingConfig.importTagsWithoutPrefix(config,\"xAODdumping.\") > 0)\n\n jobID = CLI.getTagStringDefault(\"jobID\",\"analyze\")\n\n #add xAODskimmingAlgorithm if requested (only for MCASV as we'd have event duplications otherwise!)\n #note: if we ever implement an option to limit the number of channels executed at the same time we must ensure this does not run in such a configuration!!!!\n if dumpXAODs:\n print(\"Setting up xAOD skimming Algorithm...\")\n xAODskimmingAlg = ROOT.TQxAODskimmingAlgorithm()\n xAODskimmingAlg.SetName(\"xAODdumper\")\n if xAODdumpingConfig.hasTag(\"flagName\"): xAODskimmingAlg.setFlagName(xAODdumpingConfig.getTagStringDefault(\"flagName\",\"\"))\n xAODskimmingAlg.setOutputDir( xAODdumpingConfig.getTagStringDefault(\"outputDir\",\"CAFxAODs\") )\n xAODskimmingAlg.setFilePrefix(jobID+\"_\")\n if config.hasTag(\"nameTagName\") : xAODskimmingAlg.setPrefix( config.getTagStringDefault( ROOT.TString(\"aliases.\")+config.getTagStringDefault(\"nameTagName\",\"\"), \"\" ) )\n visitor.addAlgorithm( xAODskimmingAlg )", "def signatureAlgorithm(self) -> str:\n sig_algo = self['signature_algorithm'].signature_algo\n return sig_algo", "def get_algorithm_functionality(\n model: MLTypes.ModelType = None,\n y: MLTypes.DatasetType = None,\n objective: str = None,\n ) -> AlgorithmFunctionality:\n # Check if LightGBM is being used with SciKit-Learn API:\n if objective is None:\n return super().get_algorithm_functionality(model=model, y=y)\n\n # Declare the conversion map according to the LightGBM docs:\n objective_to_algorithm_functionality_map = {\n # regression application:\n \"regression\": AlgorithmFunctionality.REGRESSION,\n \"regression_l2\": AlgorithmFunctionality.REGRESSION,\n \"l2\": AlgorithmFunctionality.REGRESSION,\n \"mean_squared_error\": AlgorithmFunctionality.REGRESSION,\n \"mse\": AlgorithmFunctionality.REGRESSION,\n \"l2_root\": AlgorithmFunctionality.REGRESSION,\n \"root_mean_squared_error\": AlgorithmFunctionality.REGRESSION,\n \"rmse\": AlgorithmFunctionality.REGRESSION,\n \"regression_l1\": AlgorithmFunctionality.REGRESSION,\n \"l1\": AlgorithmFunctionality.REGRESSION,\n \"mean_absolute_error\": AlgorithmFunctionality.REGRESSION,\n \"mae\": AlgorithmFunctionality.REGRESSION,\n \"huber\": AlgorithmFunctionality.REGRESSION,\n \"fair\": AlgorithmFunctionality.REGRESSION,\n \"poisson\": AlgorithmFunctionality.REGRESSION,\n \"quantile\": AlgorithmFunctionality.REGRESSION,\n \"mape\": AlgorithmFunctionality.REGRESSION,\n \"mean_absolute_percentage_error\": AlgorithmFunctionality.REGRESSION,\n \"gamma\": AlgorithmFunctionality.REGRESSION,\n \"tweedie\": AlgorithmFunctionality.REGRESSION,\n # binary classification application:\n \"binary\": AlgorithmFunctionality.BINARY_CLASSIFICATION,\n # multi-class classification application:\n \"multiclass\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"softmax\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"multiclassova\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"multiclass_ova\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"ova\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"ovr\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n # cross-entropy application\n \"cross_entropy\": AlgorithmFunctionality.BINARY_CLASSIFICATION,\n \"xentropy\": AlgorithmFunctionality.BINARY_CLASSIFICATION,\n \"cross_entropy_lambda\": AlgorithmFunctionality.BINARY_CLASSIFICATION,\n \"xentlambda\": AlgorithmFunctionality.BINARY_CLASSIFICATION,\n # ranking application\n \"lambdarank\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"rank_xendcg\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"xendcg\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"xe_ndcg\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"xe_ndcg_mart\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"xendcg_mart\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n }\n\n # Return unknown if the objective is not in the map and otherwise return its functionality:\n if objective not in objective_to_algorithm_functionality_map:\n raise AlgorithmFunctionality.UNKNOWN\n return objective_to_algorithm_functionality_map[objective]", "def list_algorithms(CreationTimeAfter=None, CreationTimeBefore=None, MaxResults=None, NameContains=None, NextToken=None, SortBy=None, SortOrder=None):\n pass", "def train(self):\n\n if self.Algorithms.startswith(\"TMVA:\"):\n self.trainTMVAMethods()\n elif self.Algorithms.startswith(\"SKL:\"):\n self.trainSKLMethods()\n elif self.Algorithms.startswith(\"TF:\"):\n self.trainTFMethods()\n else:\n print(\"ERROR: Unknown algorithm: {}\".format(self.Algorithms))\n\n return", "def get_winners(self):\n\n if self.optimal is not None:\n return self.optimal\n clean_proposals = self.cleaner.create_scenarios(self.proposals)\n self.optimal = self.optimizer.optimize(clean_proposals)\n return self.optimal", "def tunnel1_phase2_integrity_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel1_phase2_integrity_algorithms\")", "def tunnel1_phase2_integrity_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel1_phase2_integrity_algorithms\")", "def tunnel1_phase1_encryption_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel1_phase1_encryption_algorithms\")", "def tunnel1_phase1_encryption_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel1_phase1_encryption_algorithms\")", "def algorithm(self) -> str:\n return self.auth_type.name", "def algorithm_config(self) -> Optional['outputs.AiIndexMetadataConfigAlgorithmConfig']:\n return pulumi.get(self, \"algorithm_config\")", "def bvp_algorithm(name, **kwargs):\n # Load algorithm from the package\n for algorithm in available_algorithms:\n if name.lower() == algorithm.__name__.lower():\n return algorithm(**kwargs)\n else:\n # Raise exception if the loop completes without finding an algorithm by the given name\n raise ValueError('Algorithm ' + name + ' not found')", "def get_clustering_algorithm_class(cls):\n return {\n \"spectral\": SpectralClusteringAlgorithm,\n \"dbscan\": DBSCANAlgorithm,\n \"gromos\": GromosAlgorithm,\n \"kmedoids\": KMedoidsAlgorithm,\n \"random\": RandomClusteringAlgorithm,\n \"hierarchical\": HierarchicalClusteringAlgorithm\n }", "def supported_operation_modes(\n self,\n ) -> list[HVACModeT]:", "def current ( self ) :\n if not self._ictxs :\n raise RuntimeError , \" Invalid pointer to IIncidentSvc \"\n # \n curr = self._ictxs.currentAlg()\n if not curr : return None\n # \n return iAlgorithm ( curr.name() , curr )", "def current ( self ) :\n if not self._ictxs :\n raise RuntimeError , \" Invalid pointer to IIncidentSvc \"\n # \n curr = self._ictxs.currentAlg()\n if not curr : return None\n # \n return iAlgorithm ( curr.name() , curr )", "def get_all_parsers():\n return [OptimizerFactory.get_parser(optimizer) for optimizer in OptimizerFactory.optimizers]", "def tunnel1_phase2_encryption_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel1_phase2_encryption_algorithms\")", "def tunnel1_phase2_encryption_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel1_phase2_encryption_algorithms\")", "def tunnel2_phase1_encryption_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel2_phase1_encryption_algorithms\")", "def tunnel2_phase1_encryption_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel2_phase1_encryption_algorithms\")", "def extractBestAlgorithms(args = algs2009, f_factor=2,\n target_lb=1e-8, target_ub=1e22):\n\n # TODO: use pproc.TargetValues class as input target values\n # default target values:\n targets = pproc.TargetValues(\n 10**np.arange(np.log10(max((1e-8, target_lb))),\n np.log10(target_ub) + 1e-9, 0.2))\n # there should be a simpler way to express this to become the\n # interface of this function\n\n print 'Loading algorithm data from given algorithm list...\\n' \n\n verbose = True\n dsList, sortedAlgs, dictAlg = pproc.processInputArgs(args, verbose=verbose)\n\n print 'This may take a while (depending on the number of algorithms)'\n\n selectedAlgsPerProblem = {}\n for f, i in pproc.dictAlgByFun(dictAlg).iteritems():\n for d, j in pproc.dictAlgByDim(i).iteritems():\n selectedAlgsPerProblemDF = []\n best = BestAlgSet(j)\n \n for i in range(0, len(best.target)):\n t = best.target[i]\n # if ((t <= target_ub) and (t >= target_lb)):\n if toolsstats.in_approximately(t,\n targets((f, d), discretize=True)):\n # add best for this target:\n selectedAlgsPerProblemDF.append(best.algs[i])\n \n # add second best or all algorithms that have an ERT\n # within a factor of f_factor of the best:\n secondbest_ERT = np.infty\n secondbest_str = ''\n secondbest_included = False \n for astring in j:\n currdictalg = dictAlg[astring].dictByDim()\n if currdictalg.has_key(d):\n curralgdata = currdictalg[d][f-1] \n currERT = curralgdata.detERT([t])[0]\n if (astring != best.algs[i]):\n if (currERT < secondbest_ERT):\n secondbest_ERT = currERT\n secondbest_str = astring\n if (currERT <= best.detERT([t])[0] * f_factor):\n selectedAlgsPerProblemDF.append(astring)\n secondbest_included = True\n if not (secondbest_included) and (secondbest_str != ''):\n selectedAlgsPerProblemDF.append(secondbest_str)\n \n if len(selectedAlgsPerProblemDF) > 0:\n selectedAlgsPerProblem[(d, f)] = selectedAlgsPerProblemDF\n \n print 'pre-processing of function', f, 'done.' \n \n print 'loading of best algorithm(s) data done.'\n \n countsperalgorithm = {}\n for (d, f) in selectedAlgsPerProblem:\n print 'dimension:', d, ', function:', f\n setofalgs = set(selectedAlgsPerProblem[d,f])\n \n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += selectedAlgsPerProblem[d,f].count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"\n \n return selectedalgsperdimension", "def tunnel2_phase1_integrity_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel2_phase1_integrity_algorithms\")", "def tunnel2_phase1_integrity_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel2_phase1_integrity_algorithms\")", "def supported_modes(self) -> Set[str]:\n raise NotImplementedError", "def tunnel1_phase1_integrity_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel1_phase1_integrity_algorithms\")", "def tunnel1_phase1_integrity_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel1_phase1_integrity_algorithms\")", "def init_algorithm(self):\n pass", "def _set_up_pacman_algorithm_listings(\n self, algorithms, optional_algorithms, xml_paths, inputs,\n required_outputs):\n\n # deduce if the algorithms are internal or external\n algorithms_names = list(algorithms)\n\n # set up XML reader for standard PACMAN algorithms XML file reader\n # (used in decode_algorithm_data_objects function)\n xml_paths.append(os.path.join(\n os.path.dirname(operations.__file__),\n \"algorithms_metadata.xml\"))\n xml_paths.append(os.path.join(\n os.path.dirname(algorithm_reports.__file__),\n \"reports_metadata.xml\"))\n\n converter_xml_path = list()\n converter_xml_path.append(os.path.join(\n os.path.dirname(file_format_converters.__file__),\n \"converter_algorithms_metadata.xml\"))\n\n # decode the algorithms specs\n xml_decoder = ConvertAlgorithmsMetadata(xml_paths)\n algorithm_data_objects = xml_decoder.decode_algorithm_data_objects()\n xml_decoder = ConvertAlgorithmsMetadata(converter_xml_path)\n converter_algorithm_data_objects = \\\n xml_decoder.decode_algorithm_data_objects()\n\n # filter for just algorithms we want to use\n algorithm_data = self._get_algorithm_data(\n algorithms_names, algorithm_data_objects,\n converter_algorithm_data_objects)\n optional_algorithms_datas = self._get_algorithm_data(\n optional_algorithms, algorithm_data_objects,\n converter_algorithm_data_objects)\n optional_algorithms_datas.extend(\n converter_algorithm_data_objects.values())\n\n # sort_out_order_of_algorithms for execution\n self._sort_out_order_of_algorithms(\n inputs, required_outputs, algorithm_data,\n optional_algorithms_datas)", "def algo_selection(algos: tuple):\n print_header()\n print_list_algos(algos)\n print(\"Your choice: \", end='')\n return get_num_algo(algos)", "def tunnel2_phase2_integrity_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel2_phase2_integrity_algorithms\")", "def tunnel2_phase2_integrity_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel2_phase2_integrity_algorithms\")", "def getAI(self):\n device = self.reducetoolbar.detectorcombobox.currentText()\n ai = self.calibrationsettings.AI(device)\n return ai", "def tools(self):\n\n return self._available_tools", "def get_tools_using_technique():\n global tools_using_technique\n\n if not tools_using_technique:\n tools_using_technique = rsh.tools_using_technique(get_srcs())\n \n return tools_using_technique", "def tunnel2_phase2_encryption_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel2_phase2_encryption_algorithms\")", "def tunnel2_phase2_encryption_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel2_phase2_encryption_algorithms\")", "def get_heuristics():\n from ixle import heuristics\n def is_heuristic(obj):\n FORBID = 'Heuristic SuggestiveHeuristic'.split() # sigh make metaclass\n return getattr(obj,'__name__', None) not in FORBID and \\\n getattr(obj, 'is_heuristic', False)\n return _harvest(heuristics, test=is_heuristic)", "def get_algorithm_dict(self, param_dict, algorithm):\n \n output_dict = None\n \n for exp_field in param_dict.keys():\n \n if param_dict[exp_field]['algorithm'] == algorithm:\n \n outpu_dict = param_dict[exp_field]['algorithm_dict']\n \n return output_dict", "def getConfiguredBackends():\n\ttry:\n\t\tfrom OPSI.Backend.BackendManager import BackendDispatcher\n\texcept ImportError as impError:\n\t\tlogger.debug(\"Import failed: {}\", impError)\n\t\treturn None\n\n\ttry:\n\t\tdispatcher = BackendDispatcher(\n\t\t\tdispatchConfigFile='/etc/opsi/backendManager/dispatch.conf',\n\t\t\tbackendconfigdir='/etc/opsi/backends/',\n\t\t)\n\texcept BackendConfigurationError as bcerror:\n\t\tlogger.debug(\"Unable to read backends: {}\", bcerror)\n\t\treturn None\n\n\tnames = [name.lower() for name in dispatcher.dispatcher_getBackendNames()]\n\tdispatcher.backend_exit()\n\n\treturn set(names)", "def host_key_algorithm(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key_algorithm\")", "def host_key_algorithm(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key_algorithm\")", "def host_key_algorithm(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key_algorithm\")", "def host_key_algorithm(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key_algorithm\")", "def algorithmInfo():\n\t\treturn r\"\"\"Fister Jr., Iztok and Fister, Dusan and Yang, Xin-She. \"A Hybrid Bat Algorithm\". Elektrotehniski vestnik, 2013. 1-7.\"\"\"", "def airl():\n algorithm = \"airl\"", "def sem_type_set(cur_key, algorithm=\"IID_LINEAR\"):\n if algorithm in list(SEM_TYPE.keys()):\n if cur_key == \"sem_type\":\n res = SEM_TYPE[algorithm]\n else:\n res = []\n else:\n res = []\n return res", "def _get_ciphering_function(algorithm: Algorithm) -> Callable:\n ciphering_function = None\n if algorithm == Algorithm.caesar:\n ciphering_function = cifra.cipher.caesar.cipher\n elif algorithm == Algorithm.affine:\n ciphering_function = cifra.cipher.affine.cipher\n elif algorithm == Algorithm.substitution:\n ciphering_function = cifra.cipher.substitution.cipher\n elif algorithm == Algorithm.transposition:\n ciphering_function = cifra.cipher.transposition.cipher\n elif algorithm == Algorithm.vigenere:\n ciphering_function = cifra.cipher.vigenere.cipher\n return ciphering_function", "def print_all_separation_algorithms():\n from ..separation import all_separation_algorithms\n print('\\n'.join([a.__name__ for a in all_separation_algorithms]))", "def __init__(self, algorithms={}, strategies={}, pickleFile=None):\n if pickleFile is None:\n self.algds = algorithms\n self.stratds = strategies\n self._bestalg = None\n self._unifpf = None\n else:\n if pickleFile.find('.gz') < 0:\n pickleFile += '.gz'\n with gzip.open(pickleFile) as f:\n entry = pickle.load(f)\n self.algds = entry.algds\n self.stratds = entry.stratds\n self._bestalg = entry._bestalg\n self._unifpf = entry._unifpf", "def tunnel1_phase2_integrity_algorithms(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"tunnel1_phase2_integrity_algorithms\")" ]
[ "0.72617215", "0.69703907", "0.6834078", "0.649296", "0.64006877", "0.6347613", "0.62988967", "0.62988967", "0.62693024", "0.62255967", "0.6208306", "0.6183921", "0.6171044", "0.6160205", "0.6155423", "0.6155423", "0.60712767", "0.60712767", "0.6055243", "0.6055243", "0.60232526", "0.5974244", "0.5836437", "0.58189136", "0.58051974", "0.5659022", "0.5638491", "0.562957", "0.5592733", "0.55262816", "0.54983765", "0.53562284", "0.5353418", "0.5350351", "0.53352165", "0.5333539", "0.53280723", "0.5315253", "0.5311341", "0.531078", "0.52925795", "0.5275126", "0.52550995", "0.518019", "0.51766044", "0.5171087", "0.51675767", "0.5159115", "0.5143235", "0.51308626", "0.5119318", "0.511558", "0.5102434", "0.50982505", "0.50868696", "0.50868696", "0.5076765", "0.5076765", "0.5075775", "0.50730336", "0.5071572", "0.50661457", "0.50659937", "0.50535446", "0.50535446", "0.5036214", "0.50340635", "0.50340635", "0.5029134", "0.5029134", "0.502871", "0.5027479", "0.5027479", "0.5025297", "0.5022249", "0.5022249", "0.5010877", "0.50097394", "0.5009546", "0.50015825", "0.50015825", "0.49880388", "0.49820703", "0.497669", "0.49681896", "0.49681896", "0.49549267", "0.4947465", "0.49470714", "0.4942663", "0.4942663", "0.4942663", "0.4942663", "0.49328145", "0.4932512", "0.49261957", "0.49235392", "0.49226758", "0.4921147", "0.49138013" ]
0.79699975
0
Gets the current train_ensemble
def _get_train_ensemble(self): self._validate_train_ensemble() return deepcopy(self.train_ensemble)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ensemble(self):\n return self._ensemble", "def getTrainSet(self):\r\n return self.fTrainData", "def training_set(self):\n return self._training_set", "def getTrainInstance(self): #NOTE: Probably faster way of doing this than additional 'if' statement every learning iteration\r\n return [self.currentTrainState, self.currentTrainPhenotype] #Return unadulterated training data\r", "def get_classifier(self):\n return self.model", "def ensemble_perts(self):\n #emean = self.ensemble_mean()\n return self - self.ensemble_mean()\n #return self.state.values", "def get_hr_ensemble(self, ensemble_id: str):\n return self.hr_ensemble.get(ensemble_id, None)", "def train_data(self):\n return self._train_data", "def ensemble_mean(self):\n self.cube = self.cube_ensemble_mean(self.cube)\n self.processes.append('ensemble_mean')\n return self.cube", "def get_train_examples(self):\n raise NotImplementedError()", "def get_estimator_state(self):\n return self.estimator.state_dict()", "def get_estimator_state(self):\n return self.estimator.state_dict()", "def get_ensemble_model():\n ss = StandardScaler()\n xgb_clf = xgb.XGBClassifier(objective=\"binary:logistic\", random_state=42)\n\n xgb_model = Pipeline(steps=(['scale', ss], ['clf', xgb_clf]))\n\n xgb_model_params = {\n \"clf__colsample_bytree\": uniform(0.5, 0.5), # default 1\n \"clf__gamma\": loguniform(1e-1, 1e3), # default 0\n \"clf__learning_rate\": uniform(0.03, 0.57), # default 0.3\n \"clf__max_depth\": randint(2, 5), # default 3\n \"clf__n_estimators\": randint(10, 50), # default 100\n \"clf__subsample\": uniform(0.5, 0.25), # default 1\n \"clf__min_child_weight\": randint(1, 8) # default 1\n }\n\n # model: classifier with randomised parameter search over nested 3-fold CV (more iters to account for large space)\n ensemble_model = RandomizedSearchCV(xgb_model, xgb_model_params, n_iter=250, cv=3)\n\n return clone(ensemble_model)", "def train(self):\n return", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def train(self):\n return self.with_transforms(\"train\")", "def ensemble():\n return {\n \"type\": \"class\",\n \"base\": \"activity.activity\",\n \"is_abstract\": False,\n \"properties\": [\n (\n \"common_conformances\",\n \"linked_to(activity.conformance)\",\n \"0.N\",\n \"Conformance documents for requirements common across \"\n \"ensemble.\",\n ),\n (\n \"representative_performance\",\n \"linked_to(platform.performance)\",\n \"0.1\",\n \"Representative model performance across ensemble.\",\n ),\n (\n \"documentation\",\n \"shared.online_resource\",\n \"0.N\",\n \"Links to web-pages and other ensemble specific documentation \"\n \"(including workflow descriptions).\",\n ),\n (\n \"ensemble_axes\",\n \"linked_to(activity.ensemble_axis)\",\n \"0.N\",\n \"Set of axes for the ensemble.\",\n ),\n (\n \"uber_ensembles\",\n \"linked_to(activity.uber_ensemble)\",\n \"0.N\",\n \"Link to one or more over-arching ensembles that might \"\n \"includes this one.\",\n ),\n (\n \"experiments\",\n \"linked_to(designing.numerical_experiment)\",\n \"1.N\",\n \"Experiments with which the ensemble is associated (may \"\n \"differ from constituent simulations).\",\n ),\n (\n \"members\",\n \"linked_to(activity.simulation)\",\n \"0.N\",\n \"Simulations within ensemble (should only be zero while \"\n \"ensemble is being defined)\",\n ),\n ],\n \"constraints\": [\n (\"cardinality\", \"rationale\", \"0.0\"),\n (\"cardinality\", \"canonical_name\", \"0.0\"),\n (\"cardinality\", \"keywords\", \"0.0\"),\n (\"cardinality\", \"duration\", \"0.0\"),\n ],\n }", "def getTrainingData(self):\n raise NotImplementedError", "def train_op_a(self):\r\n return self._train_op_a", "def ensemble_mean(self):\n new_cubelist = []\n for cube in self.cubelist:\n new_cubelist.append(self.cube_ensemble_mean(cube))\n self.cubelist = iris.cube.CubeList(new_cubelist)\n self.processes.append('ensemble_mean')\n return self.cubelist", "def get_trainer(self):\n return AutoEncoderTrainer", "def uber_ensemble():\n return {\n \"type\": \"class\",\n \"base\": \"activity.ensemble\",\n \"is_abstract\": False,\n \"properties\": [\n (\n \"child_ensembles\",\n \"linked_to(activity.ensemble)\",\n \"1.N\",\n \"Ensemble which are aggregated into this one.\",\n )\n ],\n \"constraints\": [\n (\"cardinality\", \"ensemble_axes\", \"1.N\"),\n (\"cardinality\", \"common_conformances\", \"0.0\"),\n (\"cardinality\", \"members\", \"0.0\"),\n ],\n }", "def ensemble_mean(self):\n return self.mean(dim='mem')", "def getTrainInstance(self):\n self.train_inst_condition = self.format_data.trainFormatted[self.data_ref][0]\n self.train_inst_phenotype = self.format_data.trainFormatted[self.data_ref][1]\n if self.data_ref < (self.format_data.numTrainphenotypes - 1):\n self.data_ref += 1\n else: # Once learning has completed an epoch (i.e. a cycle of iterations though the entire training dataset) it starts back at the first instance in the data)\n self.data_ref = 0\n return [self.train_inst_condition, self.train_inst_phenotype]", "def training_info(self):\n pass", "def get_ensemble_id(model):\n if 'object' in model and 'ensemble_id' in model['object']:\n return \"ensemble/%s\" % model['object']['ensemble_id']", "def get_data_train(self):\n return self.get_data(self.file_train, self.batch)", "def train(self) -> tf.contrib.data.Dataset:\n return self.__train_dataset", "def get_ensemble_model(params):\n ss = StandardScaler()\n xgb_reg = xgb.XGBRegressor(objective=\"reg:squarederror\",n_jobs=1, base_score=12, learning_rate=0.05, random_state=42)\n\n if params['pca']:\n pca = PCA(n_components=params['pca_comps'], whiten=True)\n xgb_model = Pipeline(steps=(['scale', ss], ['pca', pca], ['model', xgb_reg])) # pipeline\n else:\n xgb_model = Pipeline(steps=(['scale', ss], ['model', xgb_reg]))\n\n xgb_model_params = {\n \"model__n_estimators\": [100,250,500],\n \"model__colsample_bytree\": uniform(0.5, 0.5), # default 1\n \"model__min_child_weight\": randint(1,6), #deafult 1\n \"model__max_depth\": randint(2, 5), # default 3, 3-10 -\n \"model__subsample\": uniform(0.5, 0.5), # default 1\n \"model__reg_lambda\": loguniform(1e1,1e2) # l2 reg, default 1\n }\n\n # model: classifier with randomised parameter search over nested 3-fold CV (more iters to account for large space)\n ensemble_model = RandomizedSearchCV(xgb_model, xgb_model_params, n_iter=500, cv=5, verbose=1, n_jobs=5)\n\n return clone(ensemble_model)", "def train_op_b(self):\r\n return self._train_op_b", "def trainer(self):\n return self._trainer", "def get_train(self, preprocess=False):\n return self._dataset(self._directory, 'images_background_small1', preprocess)", "def get_train(self, data_file):\n return self.read_data(data_file)", "def train(self):\n raise NotImplementedError", "def get_train(self, data_file):\r\n return self.read_data(data_file)", "def train_labels(self):\n return self._train_labels", "def experiment(self):\n return self._experiment", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train_edges(self):\n return self._train_edges", "def train_ensemble(table, tree_prior, config):\n tasks = []\n for sub_seed in range(config['model_ensemble_size']):\n sub_config = config.copy()\n sub_config['seed'] += sub_seed\n tasks.append((table, tree_prior, sub_config))\n return parallel_map(_train_model, tasks)", "def train(self):\n raise NotImplementedError()", "def ensemble_entropy(self):\n return transition_matrices.entropy_measure(self.ensemble_transition_matrix)", "def train(self):\n\t\traise NotImplementedError", "def _get_classifiers(self):\n return self.__classifiers", "def _get_classifiers(self):\n return self.__classifiers", "def _get_classifiers(self):\n return self.__classifiers", "def get_active(cls) -> FeatureSet:\n if cls._active_feature_set is None:\n raise RuntimeError('No FeatureSet being actively defined.')\n return cls._active_feature_set", "def _get_current_hyperparameters(self):", "def ensemble_trace(self):\n return np.trace(self.ensemble_transition_matrix)", "def train_data(self):\n\n return self.__train_data, self.__train_labels", "def train_linear_ensemble(x, y, alpha, max_iter, n_ensembles):\n # x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\n x_train, y_train = x, y\n ensemble_models = []\n for i in range(n_ensembles):\n samples = sample_without_replacement(n_population=x_train.shape[0], n_samples=(x_train.shape[0]/5))\n x_seg_train = pd.DataFrame()\n y_seg_train = pd.Series()\n for sample in samples:\n x_seg_train = pd.concat([x_seg_train, x_train.iloc[[sample]]])\n y_seg_train = pd.concat([y_seg_train, y_train.iloc[[sample]]])\n\n model: Ridge = Ridge(alpha=alpha, normalize=True, max_iter=max_iter).fit(x_seg_train, y_seg_train)\n print(model.score(x_seg_train, y_seg_train))\n # print(model.score(x_test, y_test))\n ensemble_models.append(model)\n\n return ensemble_models", "def classifier(self):\n \n if self._classifier is None:\n self._classifier = self.build_classifier(name='classify')\n \n if self.classifier_weights is not None:\n self._classifier.load_weights(self.classifier_weights)\n \n return self._classifier", "def ensemble(final_X_test, final_X_train, Y_train, Yl, index_train):\n ensemble_models = [DecisionTreeClassifier(),\n LinearSVC(),\n GaussianNB(),\n LogisticRegression(solver='lbfgs', multi_class='auto'),\n SVC(kernel=\"linear\", C=0.025)]\n n_folds = len(ensemble_models)\n kf = KFold(n_folds, shuffle=True)\n X_lv2 = np.zeros((final_X_train.shape[0], n_folds))\n y_lv2 = np.zeros(Y_train.shape)\n for itrain, itest in kf.split(final_X_train):\n y_lv2[itest] = Y_train[itest]\n # Train\n for n in range(n_folds):\n ensemble_models[n].fit(final_X_train[itrain, :], Y_train[itrain])\n X_lv2[itest, n] = ensemble_models[\n n].predict(final_X_train[itest, :])\n # Nivel 2\n Clas_lv2_m2 = SVC(kernel=\"linear\")\n Clas_lv2_m2.fit(X_lv2, y_lv2)\n # Train\n for n in range(n_folds):\n ensemble_models[n].fit(final_X_train, Y_train)\n # Predicción\n Ypred_test = np.zeros((Y_test.shape[0], n_folds))\n Ypred_excl = np.zeros((final_X_train.shape[0], n_folds))\n for n in range(n_folds):\n Ypred_test[:, n] = ensemble_models[n].predict(final_X_test)\n Ypred_excl[:, n] = ensemble_models[n].predict(final_X_train)\n yc2 = Clas_lv2_m2.predict(Ypred_excl)\n Yl_prediccion = np.zeros(Yl.shape[0])\n Yl_prediccion[index_train] = yc2\n plt.imshow(np.reshape(Yl_prediccion, (145, 145), order=\"F\")),\n plt.axis('off'),\n plt.title('Ensemble')\n plt.show()", "def use_ensemble_model(self):\n\t\t#get predictions\n\t\tpredictions_mnist = self.use_mnist_model()\n\t\tpredictions_print_img = self.use_model_trained_on_printed_images()\n\t\tpredictions_tesseract = self.use_pytesseract()\n\n\t\t# create empty ndarray\n\t\tnumbers = np.zeros(shape=(self.sudoku_size, self.sudoku_size))\n\n\t\t#find the most frequent\n\t\tfor i in range(self.sudoku_size):\n\t\t\tfor j in range(self.sudoku_size):\n\t\t\t\tpreds = [predictions_mnist[i][j], predictions_print_img[i][j],predictions_tesseract[i][j]]\n\t\t\t\tif len(set(preds)) == 3:\n\t\t\t\t\tnumbers[i][j] = preds[2]\n\t\t\t\telse:\n\t\t\t\t\toccurence_count = Counter(preds)\n\t\t\t\t\tnumbers[i][j] = occurence_count.most_common(1)[0][0]\n\n\t\treturn numbers", "def get_train_labels(self):\n raise NotImplementedError", "def train_prop(self):\n return self.__train_prop", "def get_predictors(self):\n\t\treturn self.predictors", "def get_train_iterator(self) -> tf.contrib.data.Iterator:\n return self.train.make_initializable_iterator()", "def get_assigner(self):\n return self.ewma_trainer.apply([self.mean, self.variance])", "def get_assigner(self):\n return self.ewma_trainer.apply([self.mean, self.variance])", "def get_regression_model(self):\n\n return self._lr", "def train_step(self):\n pass", "def train(self):\n model = TreeTrainer.train(self)\n model['latent'] = self._latent\n model['suffstats'] = {\n 'vert_ss': self._vert_ss,\n 'edge_ss': self._edge_ss,\n 'feat_ss': self._feat_ss,\n }\n return model", "def example(self):\n result = getattr(self, '_example', None)\n if result is None:\n # No example batch was found, so get one from the `.train` dataset\n result = next(iter(self.train))\n # And cache it for next time\n self._example = result\n return result", "def train(self) -> np.ndarray:\n if self.train_view is None:\n self.train_view = DataView(self._train, self.transform_hooks, self.cache)\n\n return self.train_view", "def load(self):\n checkpoints = glob(os.path.join(os.path.dirname(self.filepath), \"*/\"))\n if not checkpoints:\n return None\n\n latest = max(checkpoints, key=os.path.getctime)\n self.estimator = self.estimator.load(latest)\n logging.info(f\"Loaded estimator from {latest}.\")\n return self.estimator", "def feature(self):\n return self._feature", "def feature(self):\n return self._feature", "def _get_workload(self):\n return self._workload", "def predict_ensemble(ensemble, X):\n probs = [r.predict_proba(X)[:, 1] for r in ensemble]\n return np.vstack(probs).mean(axis=0)", "def get_workload(self):\n return self.workload", "def get_features(self):\n return self._features", "def get_ensemble_info(ensemble_id='Ens218'):\n\tensemble_id=ensemble_id.lower().replace('ens','') # ensemble_id should be an integer\n\tif ensemble_id:\n\t\tquery = \"SELECT * FROM ensembles WHERE ensemble_id='%s'\"%ensemble_id\n\t\t# result = db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM ensembles WHERE ensemble_id='%s'\", (ensemble_id,)).fetchone()\n\t\t# result = db.get_engine(current_app, 'methylation_data').execute(query).fetchone()\n\t\t# if result is None:\n\t\t# \tresult = db.get_engine(current_app, 'snATAC_data').execute(query).fetchone()\n\telse:\n\t\tensemble_id = int(filter(str.isdigit, ensemble_id))\n\t\tquery = \"SELECT * FROM ensembles WHERE ensemble_id='%s'\"%ensemble_id\n\n\n\tresult = db.get_engine(current_app, 'methylation_data').execute(query).fetchone()\n\tif result is None:\n\t\tresult = db.get_engine(current_app, 'snATAC_data').execute(query).fetchone()\n\n\t# Hard coded failsafe -- if the ensemble fails to load, default to MOp\n\tif result is None:\n\t\tensemble_id = 'Ens218';\n\t\tresult=get_ensemble_info(ensemble_id);\n\n\treturn result", "def training_frame(self):\n return self._parms.get(\"training_frame\")", "def ticker(self):\n return self.__train_prop[\"ticker\"]", "def train_frac(self):\n return self._train_frac", "def train(self, ):\n raise NotImplementedError", "def is_training(self):\n return self.mode == \"train\"", "def is_training(self):\n return self.mode == \"train\"", "def get_ensemble_merged_data(self) -> Dict[str, np.ndarray]:\n\n data = {k: v.copy() for k, v in self.data.items()} # deep copy\n\n if self.ensemble_results.empty(): # no ensemble data available\n return data\n\n train_scores, test_scores = self.ensemble_results.train_scores, self.ensemble_results.test_scores\n end_times = self.ensemble_results.end_times\n cur, timestep_size, sign = 0, self.cum_times.size, self.metric._sign\n key_train, key_test = f'ensemble::train::{self.metric.name}', f'ensemble::test::{self.metric.name}'\n\n all_test_perfs_null = all([perf is None for perf in test_scores])\n\n train_perfs = np.full_like(self.cum_times, self.metric._worst_possible_result)\n test_perfs = np.full_like(self.cum_times, self.metric._worst_possible_result)\n\n for timestamp, train_score, test_score in zip(end_times, train_scores, test_scores):\n avail_time = timestamp - self.start_time\n while cur < timestep_size and self.cum_times[cur] < avail_time:\n # Guarantee that cum_times[cur] >= avail_time\n cur += 1\n\n # results[cur] is the closest available checkpoint after or at the avail_time\n # ==> Assign this data to that checkpoint\n time_index = min(cur, timestep_size - 1)\n # If there already exists a previous allocated value, update by a better value\n train_perfs[time_index] = sign * max(sign * train_perfs[time_index], sign * train_score)\n # test_perfs can be none when X_test is not passed\n if not all_test_perfs_null:\n test_perfs[time_index] = sign * max(sign * test_perfs[time_index], sign * test_score)\n\n update_dict = {key_train: train_perfs}\n if not all_test_perfs_null:\n update_dict[key_test] = test_perfs\n\n data.update(update_dict)\n\n return data", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def get_lr(self) -> Tensor:\n\n return self.lr_scheduler.get_lr()", "def get_data(self):\n return self.train_edges, self.train_labels, self.test_edges, self.test_labels", "def get_beam_current(self):\n raise NotImplementedError", "def train(self) -> Any:\n pass", "def getResults(self):\n return self.classifiers", "def get_regression(self):\n return self.regression" ]
[ "0.82772297", "0.6414114", "0.6375453", "0.63674873", "0.62640953", "0.6179646", "0.6133189", "0.6104231", "0.6051133", "0.59600097", "0.5959593", "0.5959593", "0.59380734", "0.59326303", "0.58842814", "0.58842814", "0.58842814", "0.58842814", "0.58842814", "0.58842814", "0.58842814", "0.58842814", "0.58842814", "0.57831234", "0.577973", "0.57277197", "0.57136345", "0.5684906", "0.5673595", "0.56684524", "0.5659611", "0.5637403", "0.56040424", "0.5603752", "0.54626364", "0.54570866", "0.54452556", "0.54437566", "0.54436696", "0.5424734", "0.5423881", "0.54213524", "0.54115915", "0.54036754", "0.54017204", "0.5387735", "0.5387735", "0.5387735", "0.5387735", "0.5387735", "0.53807247", "0.53778", "0.5368329", "0.53600615", "0.53300774", "0.53251153", "0.53251153", "0.53251153", "0.5306397", "0.53022546", "0.5299284", "0.5298532", "0.52824014", "0.5279583", "0.5273186", "0.52723646", "0.52695787", "0.5268306", "0.5267859", "0.5257684", "0.5241096", "0.5241096", "0.52387846", "0.5236899", "0.5230928", "0.5230652", "0.52170277", "0.5212277", "0.52095944", "0.52095944", "0.520778", "0.5204798", "0.52035743", "0.5173917", "0.5169635", "0.51692396", "0.51572967", "0.5155954", "0.5135983", "0.5135809", "0.5135809", "0.5133128", "0.51285076", "0.51285076", "0.5123156", "0.5120554", "0.51197666", "0.51176625", "0.51156354", "0.511408" ]
0.84837633
0
Gets the current stack_models
def _get_stack_models(self): self._validate_stack_models() if self.stack_models == "auto": val = self._get_validation_strategy() if val.get("validation_type", "") == "custom": return False return True if self.mode in ["Compete", "Optuna"] else False else: return deepcopy(self.stack_models)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def models(self):\r\n return self.get_field('model')", "def models(self):\r\n return self.get_field('model')", "def get_models(self):\n self.load()\n return self._models", "def get_models(self):\n return self.P, self.Q", "def models(self):\n return self.config.models()", "def models(self):\n return self._base.classes", "def availablemodels(self):\n return self.__models.keys()", "def getModel(self):\n return _libsbml.SBase_getModel(self)", "def get_model(self):\n return self.chain.model", "def getModel(self):\n return self.model", "def get_stack(self):\n return self.__stack", "def model(self):\n return self.model_", "def _getModel(self):\r\n \r\n return self._model", "def model(self):\n return MODELS.get(self._model,self._model)", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get():\n\n return {'model_ids': mgmt.get_model_ids()}", "def iter_models(self):\n return iter(self.model_list)", "def models(self) -> list[AbstractModel]:\n return self._models", "def model(self):\n return self.__model", "def get_object_models(self):\n parser = WorldParser(self.world_fpath)\n return parser.models", "def get_model(self):\n return self._model", "def get_model(self):\n return self._model", "def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]", "def model(self):\n return self._model", "def model(self):\n return self._model", "def model(self):\n return self._model", "def model(self):\n return self._model", "def model(self):\n return self._model", "def get_model(self):\n return self.fragment.chain.model", "def models(self) -> t.List[Model]:\n _models: t.List[Model] = [\n item for item in self._deployables if isinstance(item, Model)\n ]\n return _models", "def getModel(self):\n return self._l[1]", "def get_model(self):\n\t\treturn self.object.__class__", "def Model(self):\n return self._model", "def backModel(self):\n return getattr(self, '_backModel', None)", "def modelstorage(self):\n return self._modelstorage", "def pending_models(self):\n return self._pending_models", "def read_all_stack(self):\n return self.STACK", "def get_supported_models(self):\n # type: () -> list\n return [model for model in self.__MODELS]", "def models(self):\n models = []\n for bundle in self.bundles.values():\n models.extend(list(bundle.models.values()))\n\n return models", "def stack(self):\n return self.history", "def getCurrentModel(self) -> Optional[Type[Fit]]:\n current_module = self.fitting_modules[self.module_combo.currentText()]\n model_selected = self.model_list.currentItem()\n if model_selected is None:\n return None\n model = getattr(current_module, model_selected.text())\n return model", "def get_main_model(self):\n return self", "def get_stacks_log(self):\n return self._get_log('stacks')", "def app(self):\n return self._model", "def get_related_model(self):\n\t\treturn self.related_model", "def get_models(self, app_name):\n try:\n models = list(apps.get_app_config(app_name).get_models())\n return models\n except:\n raise LookupError(f\"this is no such app {app_name}\")", "def model(self) -> Model:\n return self.software_system.get_model()", "def get_main_model(self):\n return self._main_model", "def peak_all_models(self) -> List:\n models = list(self.meta.name)\n print(models)\n return models", "def stack(self) -> Optional[pulumi.Input['StackPropertiesArgs']]:\n return pulumi.get(self, \"stack\")", "def getModel(self):\n return _libsbml.L3ParserSettings_getModel(self)", "def _get_card_model(self, model: str) -> Any:\n return self.collection.models.byName(model)", "def models(self):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n r = requests.get(self.url + '/model', headers=headers)\n\n # Check the status code and return appropriately\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)", "def get_model_by_name(self, model_name):\n models = ModelDirectory.get_model_by_name(model_name, pipeline=self)\n return models", "def get_model_list():\n with open(os.path.join(MODELS_FOLDER, \"models.json\"), \"r\") as model_file:\n model_list = json.load(model_file)\n return model_list", "def get_stack_info():\n\n response = cloudformation.describe_stacks(\n StackName=config.CLOUDFORMATION_STACK_NAME\n )\n return response['Stacks'][0]", "def get_parent_model(self):\n return self._model", "def acquire_model(self):\n return self._model", "def backbone(self) -> ModelBackbone:\n return self.head.backbone", "def poolstack ( self ):\n return self._poolstack", "def get_modelDict(self):\n return self.__modelDict", "def get_model(self):\n return self.beta_sheet.model", "def get_related_models(self):\n\t\tmodels = []\n\t\tif not self.related_models:\n\t\t\treturn models\n\n\t\tfor model in self.related_overrides.get(self.related_override_key(), self.related_models):\n\t\t\ttry:\n\t\t\t\tgroup, model_path, extra_fields = model\n\t\t\texcept ValueError:\n\t\t\t\tgroup, model_path = model\n\t\t\t\textra_fields = ()\n\t\t\tapp_label, model_name = model_path.split('.')\n\t\t\tmodels.append((group, apps.get_model(app_label, model_name,), extra_fields, group.replace('_', ' ')))\n\n\t\treturn models", "def get_seo_models():\n seo_models = []\n for model_name in getattr(settings, setting_name_seo_models, ()):\n if \".\" in model_name:\n # TODO: Test this block\n app_label, model_name = model_name.split(\".\", 1)\n model = models.get_model(app_label, model_name)\n if model:\n seo_models.append(model)\n else:\n app = models.get_app(model_name)\n if app:\n seo_models.extend(models.get_models(app))\n\n return seo_models", "def getModelRef(self):\n return _libsbml.Submodel_getModelRef(self)", "def get_related_models(self, model):\n return self._invalidation_model_store.get(model, {})", "def xmodel ( self ) :\n return self.__xmodel", "def model(self) -> 'outputs.ModelDefinitionResponse':\n return pulumi.get(self, \"model\")", "def get_models(self):\n return [Doc(system_object) for system_object in self._get_documents()]", "def model(self) -> Model:\n return self._model", "def models():\n return list(alg2module.keys())", "def get_top_model(self):\n model = ModelHelper.load_model(filename_weight=self.top_model_path + '.h5',\n filename_model=self.top_model_path + '.json')\n\n return model", "def get_model(self):\n # just return the first model, since all replicas are the same\n return self.call_async(0, '_async_get_model').gen()", "def get_best_models(self, num_models) -> Sequence[tf.keras.Model]:\n pass", "def model_name(self):\n return self.get_queryset().model._meta.model_name", "def get_model(self):\n return self.model.module if isinstance(self.model, DDP) else self.model", "def getModelHistory(self, *args):\n return _libsbml.SBase_getModelHistory(self, *args)", "def current(cls):\n return stackless.getcurrent()", "def get_installed_models():\n global _installed_models_cache\n if _installed_models_cache is not None:\n return _installed_models_cache\n _installed_models_cache = []\n for a in settings.INSTALLED_APPS:\n try:\n _installed_models_cache.append(__import__(a + '.models', '', '', ['']))\n except ImportError:\n pass\n return _installed_models_cache", "def getModules(self):\n\n modules = cmds.ls(type=\"network\")\n for module in modules:\n attrs = cmds.listAttr(module)\n if \"rigModules\" in attrs:\n return module", "def stack(context=1):\r\n return getouterframes(sys._getframe(1), context)", "def model(self) -> PipelineModel:\n return self._model", "def get_live_tracked_models(self, model_class):\n return self.update_models[model_class] + self.create_models[model_class]", "def get_models():\n\n from lmfit.models import lmfit_models\n models = lmfit_models\n if 'Expression' in models:\n del models['Expression']\n if 'Gaussian-2D' in models:\n del models['Gaussian-2D']\n\n filenames = set()\n\n models_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'models')\n sys.path.append(models_path)\n for file_ in os.listdir(models_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'models')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n models.update(dict((n.strip('Model'), m)\n for n, m in inspect.getmembers(module,\n inspect.isclass)\n if issubclass(m, Model) and n != 'Model'))\n except ImportError:\n pass\n\n return models", "def get_latest_model():\n return get_models()[-1]", "def get(self, *args):\n return _libsbml.ListOfSubmodels_get(self, *args)", "def get_available_models():\n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n return models", "def list_models():\n\tclsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n\tverif = lambda cls: 'Verified: {0}'.format(cls[1]().verified)\n\tfit_models = [ (cls[0], verif(cls)) for cls in clsmembers if cls[1].__bases__[0] == core.FitModel ]\n\treturn fit_models", "def getListOfSubmodels(self):\n return _libsbml.CompModelPlugin_getListOfSubmodels(self)", "def model(self) -> Optional[str]:\n return pulumi.get(self, \"model\")", "def list_spss_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='SPSS Predictive Model', fields=fields)\n\t\treturn models" ]
[ "0.6803391", "0.6803391", "0.64394", "0.6406779", "0.6275852", "0.6213419", "0.6203647", "0.6106908", "0.60893244", "0.6065379", "0.5988689", "0.5973042", "0.5936472", "0.5927681", "0.5875577", "0.5875577", "0.5875577", "0.5875577", "0.5875577", "0.5875577", "0.5875577", "0.5875577", "0.5875577", "0.5875577", "0.5820495", "0.5818902", "0.578552", "0.5754407", "0.57457685", "0.57137257", "0.57137257", "0.5710266", "0.57046366", "0.57046366", "0.57046366", "0.57046366", "0.57046366", "0.5684303", "0.5681663", "0.56604177", "0.5625896", "0.56206745", "0.55762494", "0.55454385", "0.5543143", "0.55366826", "0.55163544", "0.5515815", "0.550764", "0.55037254", "0.54973", "0.5479842", "0.54657906", "0.54599535", "0.5448944", "0.54475075", "0.5445671", "0.54315984", "0.5425567", "0.5418631", "0.53867024", "0.53733397", "0.5370795", "0.5361206", "0.53547454", "0.5335455", "0.53290945", "0.53208035", "0.5320543", "0.53027505", "0.52678406", "0.52676517", "0.5263669", "0.52378285", "0.52312475", "0.52283686", "0.52041286", "0.5203916", "0.51956177", "0.5194266", "0.51865226", "0.5184275", "0.51811785", "0.5176702", "0.5129169", "0.51272774", "0.512027", "0.5116641", "0.5111536", "0.5098617", "0.5097574", "0.5095797", "0.5089228", "0.5083845", "0.5078989", "0.50674134", "0.5064529", "0.50622225", "0.5059081", "0.50584084" ]
0.72028995
0
Gets the current eval_metric
def _get_eval_metric(self): self._validate_eval_metric() if isinstance(self.eval_metric, types.FunctionType): UserDefinedEvalMetric().set_metric(self.eval_metric) return "user_defined_metric" if self.eval_metric == "auto": if self._get_ml_task() == BINARY_CLASSIFICATION: return "logloss" elif self._get_ml_task() == MULTICLASS_CLASSIFICATION: return "logloss" elif self._get_ml_task() == REGRESSION: return "rmse" else: return deepcopy(self.eval_metric)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_evaluation_metric(self):\n\t\treturn self.metric", "def _get_eval_metric(self):\n raise NotImplementedError", "def metric(self):\n return self.__metric", "def metric(self):\n return self._metric", "def metric(self):\n\n if not self._metric_cache:\n # Select an appropriate statistic\n cls = utils.import_class_or_module(self._metric)\n self._metric_cache = cls(self.additional)\n\n return self._metric_cache", "def metric(self) -> str:\r\n return self._metric", "def get_metrics(self):\n return None", "def evaluation(self):\n return self._evaluation", "def getMetricValue(self):\n return self.getOrDefault(self.metricParams)", "def get_metric(self, metric_name):\n return getattr(self, metric_name, None)", "def get_metric(self, reset: bool = False):\n top_k = self._predictions.topk(self._k)[0][:,self._k-1]\n predictions = torch.ge(self._predictions,top_k.unsqueeze(1).expand(self._batch_size,self._gold_labels.size(1))).float()\n gold_labels = self._gold_labels.float()\n self._precision_at_k += ((gold_labels * predictions).sum(1) / self._k).sum()\n\n precision_at_k = self._precision_at_k / self._ttl_size\n \n if reset:\n self.reset()\n return precision_at_k.cpu().item()", "def evaluator(self):\n return self.__evaluator", "def evaluation( self ) :\n\n return( self.__evaluation )", "def _eval_graph(self, context, sampled_rate=None, cached_id=0):\n results, names = context.run_eval_graph(sampled_rate, cached_id)\n metric = np.mean(results[list(names).index(self.metric_name)])\n return metric", "def get_metrics(self) -> dict:\n return self.metric_dict", "def _get_tunnel_metric(self):\n return self.__tunnel_metric", "def get_current_measurement(self):\n idx = self.measurementsListWidget.currentRow()\n key = list(self.mgr.obj.measurements)[idx]\n return self.mgr.obj.measurements[key]", "def _get_static_metric(self):\n return self.__static_metric", "def evaluate(self) -> Dict[str, float]:\n eval_dataloader = self.get_eval_dataloader()\n\n output = self._prediction_loop(eval_dataloader, description=\"Evaluation\")\n return output.metrics", "def getMetricName(self):\n return self.getOrDefault(self.metricName)", "def getMetricName(self):\n return self.getOrDefault(self.metricName)", "def metrics(self):\n return self.__metrics", "def get_evaluation_metric(config):\n assert 'eval_metric' in config, 'Could not find evalvalutation metric configuration'\n eval_config = config['eval_metric']\n name = eval_config.pop('name')\n\n if name == 'BCEWithLogitsLoss':\n return nn.BCEWithLogitsLoss()\n elif name == 'CrossEntropyLoss':\n return nn.CrossEntropyLoss()\n elif name == 'MSELoss':\n return nn.MSELoss()\n elif name == 'Accuracy':\n return Accuracy()\n else:\n raise RuntimeError(f\"Unsupported metric function: '{name}'. Supported losses: {SUPPORTED_METRICS}\")", "def get_metric(self) -> mt.Metric:\n return mt.BinaryAccuracy()", "def output_metric(self, key=None, metric='loss'):\n if key is None:\n key = self.key\n return self.metrics[key][metric][-1]", "def get_metrics(self, metric_name: str):\n if metric_name == \"rmse\":\n return self._rmse\n elif metric_name == \"mase\":\n return self._mase\n elif metric_name == \"mae\":\n return self._mae\n elif metric_name == \"mape\":\n return self._mape\n elif metric_name == \"f1\":\n return self._f1\n elif metric_name == \"accuracy\":\n return self._accuracy", "def get_metric(self):\n assert self._metric in self._metrics, 'Unsupported metric! Check the _metrics attribute for a list of supported metrics.'\n if self._metric == 'Euclidean':\n metric = torch.eye(self.parameters.shape[0])\n elif self._metric == 'Banana':\n n = self.dataset.shape[0]\n fish = torch.zeros(2,2)\n fish[0,0] = n/self.prior_var + 1\n fish[0,1] = n*2*self.parameters[1]/self.prior_var\n fish[1,0] = n*2*self.parameters[1]/self.prior_var\n fish[1,1] = n*4*self.parameters[1]**2/self.prior_var + 1\n metric = fish\n elif self._metric == 'Hessian':\n metric = self.get_hessian()\n elif self._metric == 'Softabs':\n hessian = self.get_hessian()\n if self._potential == 'funnel':\n hessian += torch.diag(self.jitters)\n eigs, vects = hessian.symeig(eigenvectors = True)\n softabs = (1./torch.tanh(self.softabs * eigs)) * eigs\n metric = vects @ softabs.diag() @ vects.t()\n elif self._metric == 'Fisher':\n metric = torch.zeros(self.parameters.shape[0],self.parameters.shape[0])\n grads = torch.zeros(self.parameters.shape[0])\n grads[0] = 0.5*torch.sum(self.parameters[1:]**2)*torch.exp(self.parameters[0]) + self.parameters[0]/9.\n grads[1:] = self.parameters[1:]*torch.exp(self.parameters[0])\n metric = torch.ger(grads,grads) + torch.eye(self.parameters.shape[0])/self.softabs\n return metric", "def _get_next_hop_metric(self):\n return self.__next_hop_metric", "def metric_name(self) -> str:\n return self._values.get('metric_name')", "def metric_name(self) -> str:\n return self._values.get('metric_name')", "def metric_name(self) -> str:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> str:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> str:\n return self._metric_name", "def get_model_evaluation(self):\n\n self.log(f\"{self.cur_file_path}\\t\\tInfo: model_evaluation method invoked for {self.model.__class__.__name__}!\")\n\n evaluation = ModelEvaluation(self.model, (self.trainX, self.trainY), (self.testX, self.testY))\n return evaluation.get_evaluation_report()", "def metrics(self):\n return self._metrics", "def metrics(self):\n return self._metrics", "def get_metrics(self) -> Dict[str, base.Number]:\n return self._metrics", "def create_metric(self) -> EvalMetric:\n pass", "def best_metric(self) -> float:\n return self._best_metric", "def metric_value(self) -> typing.Optional[str]:\n return self._values.get('metric_value')", "def metric_value(self) -> typing.Optional[str]:\n return self._values.get('metric_value')", "def get_evaluation_metric(config, logger, device):\r\n metrics = config['eval']['metrics']\r\n if not isinstance(metrics, list):\r\n metrics = [metrics]\r\n curves = config['eval'].get('curves', list())\r\n threshold = config['eval'].get('probability_threshold', 0.5)\r\n num_classes = config['model']['num_classes']\r\n\r\n metrics_dict = {}\r\n curves_dict = {}\r\n cm = _ConfusionMatrix(num_classes=num_classes, threshold=threshold).to(device)\r\n cm_per_target = None\r\n cms = None\r\n cms_per_target = None\r\n if any([item.startswith('per_target_') for item in metrics]):\r\n cm_per_target = _ConfusionMatrix(num_classes=num_classes, threshold=threshold).to(device)\r\n if any(['auc' in item for item in metrics]) or any(['ap' in item for item in metrics]) \\\r\n or any(['pr' in item for item in curves]) or any(['pr' in item for item in curves]):\r\n thresholds_type = config['eval'].get('thresholds_type', 'logspace')\r\n if thresholds_type == 'logspace':\r\n thresholds = ((np.logspace(0, 1, config['eval']['num_thresholds'] + 2) - 1) / 9)[1: -1]\r\n elif thresholds_type == 'logspace_pro':\r\n thresholds = ((np.logspace(0, 1, config['eval']['num_thresholds'] + 2, base=100) - 1) / 99)[1: -1]\r\n elif thresholds_type == 'linspace':\r\n thresholds = np.linspace(0.0, 1.0, config['eval']['num_thresholds'] + 2)[1: -1]\r\n elif thresholds_type == 'uline':\r\n thresholds = (((np.logspace(0, 1, config['eval']['num_thresholds'] // 2 + 2,\r\n base=10000000000) - 1) / 9999999999)[1: -1]) / 2\r\n if config['eval']['num_thresholds'] % 2 == 1:\r\n thresholds = np.append(thresholds, 0.5)\r\n for i in range(config['eval']['num_thresholds'] // 2 - 1, -1, -1):\r\n thresholds = np.append(thresholds, 1.0 - thresholds[i])\r\n else:\r\n logger.critical('thresholds_type is not supported: %s' % thresholds_type)\r\n exit(1)\r\n cms = [_ConfusionMatrix(num_classes, t, True).to(device) for t in thresholds]\r\n cms_per_target = None\r\n if any([item.startswith('per_target_') for item in curves]) or any(\r\n item.startswith('per_target_') for item in metrics):\r\n cms_per_target = [_ConfusionMatrix(num_classes, t, True).to(device) for t in thresholds]\r\n update_flags = [True, True, True, True] # single, multiple, per_target_single, per_target_multiple\r\n for metric_name in metrics:\r\n if metric_name.startswith('per_target_'):\r\n callback_fn = per_target_transform\r\n update_flag_id = 2\r\n used_cm = cm_per_target\r\n used_cms = cms_per_target\r\n else:\r\n callback_fn = None\r\n update_flag_id = 0\r\n used_cm = cm\r\n used_cms = cms\r\n if metric_name.endswith('tp'):\r\n metrics_dict[metric_name] = TruePositive(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('fp'):\r\n metrics_dict[metric_name] = FalsePositive(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('fn'):\r\n metrics_dict[metric_name] = FalseNegative(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('tn'):\r\n metrics_dict[metric_name] = TrueNegative(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('precision'):\r\n metrics_dict[metric_name] = Precision(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('recall'):\r\n metrics_dict[metric_name] = Recall(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('sensitivity'):\r\n metrics_dict[metric_name] = Sensitivity(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('specificity'):\r\n metrics_dict[metric_name] = Specificity(used_cm, update=update_flags[update_flag_id],\r\n callback_fn=callback_fn).to(device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('dsc'):\r\n metrics_dict[metric_name] = DSC(used_cm, update=update_flags[update_flag_id], callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id] = False\r\n elif metric_name.endswith('auc'):\r\n metrics_dict[metric_name] = AUC(used_cms, update=update_flags[update_flag_id + 1],\r\n callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id + 1] = False\r\n elif metric_name.endswith('ap'):\r\n metrics_dict[metric_name] = AP(used_cms, update=update_flags[update_flag_id + 1],\r\n callback_fn=callback_fn).to(\r\n device)\r\n update_flags[update_flag_id + 1] = False\r\n elif metric_name.endswith('hd95'):\r\n metrics_dict[metric_name] = HD95(threshold).to(device)\r\n else:\r\n logger.error('Unrecognized metric: %s' % metric_name)\r\n continue\r\n for curve_name in curves:\r\n if curve_name.startswith('per_target_'):\r\n callback_fn = per_target_transform\r\n update_flag_id = 2\r\n used_cm = cm_per_target\r\n used_cms = cms_per_target\r\n else:\r\n callback_fn = None\r\n update_flag_id = 0\r\n used_cm = cm\r\n used_cms = cms\r\n if curve_name.endswith('roc'):\r\n if curve_name.replace('roc', 'auc') not in metrics_dict:\r\n logger.warning('%s not in metrics but %s in curves. Adding %s to metrics'\r\n % (curve_name.replace('roc', 'auc'), curve_name, curve_name.replace('roc', 'auc')))\r\n metrics_dict[curve_name.replace('roc', 'auc')] = AUC(used_cms, update=update_flags[update_flag_id + 1],\r\n callback_fn=callback_fn)\r\n update_flags[update_flag_id + 1] = False\r\n curves_dict[curve_name] = metrics_dict[curve_name.replace('roc', 'auc')]\r\n elif curve_name.endswith('pr'):\r\n if curve_name.replace('pr', 'ap') not in metrics_dict:\r\n logger.warning('%s not in metrics but %s in curves. Adding %s to metrics'\r\n % (curve_name.replace('pr', 'ap'), curve_name, curve_name.replace('pr', 'ap')))\r\n metrics_dict[curve_name.replace('pr', 'ap')] = AP(used_cms, update=update_flags[update_flag_id + 1],\r\n callback_fn=callback_fn)\r\n update_flags[update_flag_id + 1] = False\r\n curves_dict[curve_name] = metrics_dict[curve_name.replace('pr', 'ap')]\r\n if len(metrics_dict) == 0:\r\n logger.critical('No metric is added')\r\n exit(1)\r\n return metrics_dict, curves_dict", "def metrics(self):\n metrics_registry = getattr(self._thread_local, \"klio_metrics\", None)\n if not metrics_registry:\n self._thread_local.klio_metrics = self._get_metrics_registry()\n return self._thread_local.klio_metrics", "def _get_lsp_config_metric(self):\n return self.__lsp_config_metric", "def default_metric_value(self) -> float:", "def get_current_rate(self):\n pass", "def metrics(self) -> pulumi.Output['outputs.RuntimeMetricsResponse']:\n return pulumi.get(self, \"metrics\")", "def compute_metrics(self):\n pass", "def transition_evaluation(self):\n return self.container['transition_evaluation']", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"metric_name\")", "def measure(self):\n return self._measure", "def eval_mode(self):\n return self._eval_mode", "def inspect_instance(self):\n url = \"http://%s:1337/metrics\" % self.host\n r = requests.get(url)\n try:\n s = r.json()\n except TypeError:\n s = r.text\n return s", "def metric_name(self) -> Optional[str]:\n return pulumi.get(self, \"metric_name\")", "def _get_metric(name):\n try:\n return metrics.metric(name)\n except InvalidMetricError:\n return None", "def getValue(self):\n # compute the values of my operands\n values = (op.getValue() for op in self.operands)\n # apply my operator\n return self.evaluator(*values)", "def get_metric(self, obj):\n if self.conn is None:\n return 0\n\n key = \"{}_metric\".format(obj)\n resp = self.conn.get(key)\n if resp is None:\n resp = 0\n else:\n resp = int(resp.decode('utf8'))\n return resp", "def get_metric(self, data_row: pd.Series) -> float:", "def default_metric():\n return ProductRiemannianMetric", "def metrics(self):\r\n return Metrics(self)", "def get_metrics(self):\n self.logger.debug(\"Fetching metrics.\")\n return self._api_query(\"metrics\")['metrics']", "def get_metric(self) -> mt.Metric:\n return mt.CategoricalAccuracy()", "def get_metric(self) -> mt.Metric:\n return mt.CategoricalAccuracy()", "def get_model_evaluation(client, model_name):\n model_evaluations = [e for e in client.list_model_evaluations(model_name)]\n model_evaluation = model_evaluations[0]\n print(\"Model evaluation:\")\n print(model_evaluation)\n return model_evaluation", "def get_current_value(self):\n assert(self.is_started())\n return self.currValue", "def getTelemetryValue(self) -> float:\n\t\treturn super().getTelemetryValue()", "def calculation(self):\n return self._calculation", "def get(self):\n if self.num_inst == 0:\n return (self.name, float('nan'))\n else:\n return (self.name, self.sum_metric / self.num_inst)", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def get_metric(self, reset: bool = False):\n average_value = self._total_value / self._count if self._count > 0 else 0\n if reset:\n self.reset()\n return average_value", "def metrics(self) -> dict:\n if not self.exp_metadata.metrics:\n self.exp_metadata.metrics = {}\n return self.exp_metadata.metrics", "def evaluation_status(self):\n return self._evaluation_status", "def get_metrics_class(self):\n return ClassificationMetricsValue", "def get_metric(name):\n return metric_name_to_function_mapping[name.lower()]", "def get_evaluations(self):\r\n return self.evaluations", "def get_metrics_class(self):\n return RegressionTestMetrics", "def state(self):\n return self._measure", "def metric_tests(self) -> Dict[str, FAIRResultEvaluationCriterium]:\n return self._metric_tests", "def test_get_derived_metric(self):\n pass", "def get_eval_result(self):\n return self.content_eval", "def evaluation_reducer(self) -> Union[Reducer, Dict[str, Reducer]]:\n return Reducer.AVG", "def get_regression(self):\n return self.regression", "def __evaluate_metric(dataset, y_act, y_pred):\n if dataset.metric == 'specific':\n if dataset.best_is_min:\n return return_specific_metrics(y_act, y_pred)\n else:\n return -return_specific_metrics(y_act, y_pred)\n else:\n return evaluate_metric(y_act, y_pred, dataset.metric, dataset.y_n_classes)", "def monitored(self):\n return self.monitor", "def metric_identifier(self) -> str:\n return self._metric_identifier", "def weighted_metrics(self):\n return None", "def _getMetrics(self):\n metric = None\n if self.metrics is not None:\n metric = self.metrics(self._currentRecordIndex+1)\n elif self.metricValue is not None:\n metric = self.metricValue\n else:\n raise RuntimeError('No metrics or metric value specified for dummy model')\n\n return {self._optimizeKeyPattern:metric}", "def getMeter(self):\n return self._Meter", "def evaluate(self):\n\n\t\tself.model_score = self.model.evaluate(self.x_test, self.y_test, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\t\treturn self.model_score", "def get_measured_current(self):\n status = self.get_status_response()\n current = status[16] + (status[17] * 0x100) + (status[18] * 0x10000) + (status[19] * 0x1000000)\n current = float(current)\n current /= (1000.0 * 1000.0)\n return current\n #end get_measured_current", "def metrics(self):\r\n if not hasattr(self, '_observable_metrics'):\r\n self._observable_metrics = Metrics()\r\n return self._observable_metrics", "def getValue(self):\n key = int(self.keyExpression.getValue())\n if key in self.dictOfExpressions:\n return self.dictOfExpressions[key].getValue()\n\n return 0.0" ]
[ "0.8871039", "0.8542188", "0.78376704", "0.77682865", "0.7014578", "0.6998242", "0.6777106", "0.672844", "0.67167246", "0.6679996", "0.6656904", "0.6603165", "0.65871996", "0.6512036", "0.6506062", "0.64750284", "0.64335555", "0.6335517", "0.6312627", "0.6312022", "0.6312022", "0.63058716", "0.62943065", "0.6247099", "0.62466365", "0.62465966", "0.6234684", "0.62259537", "0.62224144", "0.62224144", "0.62084365", "0.62084365", "0.61970216", "0.6144839", "0.61414146", "0.61414146", "0.6132307", "0.6119188", "0.61115384", "0.61047375", "0.61047375", "0.6095164", "0.6089484", "0.6079749", "0.6078696", "0.6065134", "0.6019648", "0.6005359", "0.5987869", "0.59678453", "0.59678453", "0.59678453", "0.59678453", "0.59678453", "0.59678453", "0.59678453", "0.59678453", "0.59678453", "0.59358317", "0.58979815", "0.58977276", "0.5882705", "0.58822554", "0.58773386", "0.58515185", "0.58423704", "0.5838192", "0.5830627", "0.5815771", "0.5814973", "0.5814973", "0.58149475", "0.58116275", "0.5804004", "0.57982206", "0.5787048", "0.577919", "0.5775274", "0.5757957", "0.5748341", "0.5743953", "0.57134736", "0.5707909", "0.5694164", "0.56889915", "0.56845784", "0.56783366", "0.5674661", "0.5673011", "0.5672837", "0.5668901", "0.56523234", "0.5648336", "0.5646909", "0.56455743", "0.563283", "0.5627603", "0.56102127", "0.56023926", "0.5592876" ]
0.76053345
4
Gets the current validation_strategy
def _get_validation_strategy(self): strat = {} self._validate_validation_strategy() if self.validation_strategy == "auto": if self._get_mode() == "Explain": strat = { "validation_type": "split", "train_ratio": 0.75, "shuffle": True, "stratify": True, } elif self._get_mode() == "Perform": strat = { "validation_type": "kfold", "k_folds": 5, "shuffle": True, "stratify": True, } elif self._get_mode() in ["Compete", "Optuna"]: strat = { "validation_type": "kfold", "k_folds": 10, "shuffle": True, "stratify": True, } if self._get_ml_task() == REGRESSION: if "stratify" in strat: # it's better to always check # before delete (trust me) del strat["stratify"] return strat else: strat = deepcopy(self.validation_strategy) if self._get_ml_task() == REGRESSION: if "stratify" in strat: del strat["stratify"] return strat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_validate(self):\n return self.validate", "def validator(self):\n return self._validator", "def paramValidationPref(self):\n # If the level of the object is below the Preference level,\n # recursively call base (super) classes to get preference at specified level\n return self.get_pref_setting_for_level(PARAM_VALIDATION_PREF, self._param_validation_pref.level)[0]", "def get_validator(self):\n return self.get_validator_class()(**self.get_validator_kwargs())", "def get_validator_class(self):\n return self.validator_class", "def get_layout_validator(self):\n if self._layout_validator is None:\n self._compute_layout_validator()\n return self._layout_validator", "def get_validation_schema(self):\n return self.validation_schema", "def session_validator(self):\n return self.session.validator", "def _get_validator(self):\n validator_class = self.validator_class or AssertionValidator\n if validator_class:\n return validator_class()", "def validation_frame(self):\n return self._parms.get(\"validation_frame\")", "def validation_state(self) -> str:\n return pulumi.get(self, \"validation_state\")", "def validation_config(self) -> Optional[pulumi.Input['ValidationConfigArgs']]:\n return pulumi.get(self, \"validation_config\")", "def validation_path(self):\n return self._validation_path", "def validation_id(self):\n return self._validation_id", "def get_validator(cls):\n cls.validator.model = cls\n return cls.validator or SageValidator", "def get_validator_class(self):\n validator_class = self.oauth_validator_class\n if validator_class is not None:\n return validator_class\n return oauth_api_settings.DEFAULT_VALIDATOR_CLASS", "def validation_type(self) -> Optional[pulumi.Input[Union[str, 'ValidationType']]]:\n return pulumi.get(self, \"validation_type\")", "def validation_required(self):\n return self._validation_required", "def _get_validation_method(self):\n return CommentWfItem.process_comment", "def validation_type(self) -> Optional[str]:\n return pulumi.get(self, \"validation_type\")", "def strategy_config(self):\n return self._strategy_config", "def validator(self) -> Optional[Dict[str, Any]]:\n return self._validator", "def get_save_strategy(self):\r\n return self.save_strategy", "def validations(self):\n return self.container['validations']", "def validation_config(self) -> pulumi.Output['outputs.ValidationConfigResponse']:\n return pulumi.get(self, \"validation_config\")", "def strategy(self) -> Optional[pulumi.Input['UpgradeSettingsStrategy']]:\n return pulumi.get(self, \"strategy\")", "def get_rule(self):\n\n return self.__rule_", "def get_setting_validator(cls, key, **kwargs):\n setting = cls.get_setting_definition(key, **kwargs)\n\n return setting.get('validator', None)", "def validations(self) -> Optional[Sequence['outputs.ValidationPatch']]:\n return pulumi.get(self, \"validations\")", "def authentication_strategy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"authentication_strategy\")", "def authentication_strategy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"authentication_strategy\")", "def validator(self) -> DataValidator:\n if self._validator is None:\n self._validator = JsonSchemaDataValidator(self.schemaview.schema)\n return self._validator", "def getValidator(self, *args):\n return _libsbml.SBMLDocument_getValidator(self, *args)", "def requested_validation(self):\n return self.caller.player_ob.db.requested_validation or []", "def validations(self) -> Sequence['outputs.Validation']:\n return pulumi.get(self, \"validations\")", "def get_strategy_name(self):\n return self._java_ref.getStrategyName()", "def validated_hook(self) -> Callable[[bool], None]:\n return self._validated_hook", "def get_rule(self):\n return self.rule.state_dict()", "def validation_time(self):\n return self._validation_time", "def getLatestValidValidation(self):\n validation = None\n lastfrom = None\n lastto = None\n for v in self.getValidations():\n validfrom = v.getDownFrom() if v else None\n validto = v.getDownTo() if validfrom else None\n if not validfrom or not validto:\n continue\n validfrom = validfrom.asdatetime().date()\n validto = validto.asdatetime().date()\n if not validation \\\n or validto > lastto \\\n or (validto == lastto and validfrom > lastfrom):\n validation = v\n lastfrom = validfrom\n lastto = validto\n return validation", "def validate(self):\n return self.__class__._validate(self)", "def load_balance_strategy(self):\n return self._load_balance_strategy", "def _get_boost_on_errors(self):\n self._validate_boost_on_errors()\n if self.boost_on_errors == \"auto\":\n val = self._get_validation_strategy()\n if val.get(\"validation_type\", \"\") == \"custom\":\n return False\n if self._get_mode() == \"Explain\":\n return False\n if self._get_mode() == \"Perform\":\n return False\n if self._get_mode() == \"Compete\":\n return True\n if self._get_mode() == \"Optuna\":\n return False\n else:\n return deepcopy(self.boost_on_errors)", "def validation(self) -> tf.contrib.data.Dataset:\n return self.__validation_dataset", "def password_validation_policy(self) -> 'outputs.PasswordValidationPolicyResponse':\n return pulumi.get(self, \"password_validation_policy\")", "def calculation_strategy(self) -> CalculationStrategy:\n if \"calculation_strategy\" in self._json_data:\n return CalculationStrategy(str(self._json_data.get(\"calculation_strategy\")))\n return CalculationStrategy.LUT", "def environment_selection_strategy(self):\n return self._environment_selection_strategy", "def get_validation_performance(self) -> float:\n return self.best_performance", "def valid_values_provider(self):\n\n if self.instance_valid_values is not None:\n return self.instance_valid_values\n\n if self.class_valid_values is not None:\n return self.class_valid_values\n\n return None", "def authentication_strategy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"authentication_strategy\")", "def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"rule\")", "def validation(self):\n validation_info = {}\n for _doc in self.schema_extension_only['@graph']:\n if \"$validation\" in _doc:\n data = _doc[\"$validation\"]\n if \"definitions\" in _doc[\"$validation\"]:\n data = expand_ref(data, _doc[\"$validation\"][\"definitions\"])\n validation_info[_doc[\"@id\"]] = data\n return validation_info", "def replacement_strategy(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"replacement_strategy\")", "def get_number_of_validation(self):\n return self.n_valid", "def algorithm(self):\n return self._algorithm", "def __selectStrategy( self ):\n chosenStrategy = self.activeStrategies[self.chosenStrategy]\n self.__incrementChosenStrategy()\n return chosenStrategy", "def is_validated(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_validated\")", "def is_validated(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_validated\")", "def is_validated(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_validated\")", "def get_algorithm(self):\n return self.alg", "def get_validation_iterator(self) -> tf.contrib.data.Iterator:\n return self.validation.make_initializable_iterator()", "def validation_method(self, validation_method):\n if (\n self.local_vars_configuration.client_side_validation\n and validation_method is None\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `validation_method`, must not be `None`\"\n ) # noqa: E501\n allowed_values = [\"0\", \"1\", \"2\"] # noqa: E501\n if (\n self.local_vars_configuration.client_side_validation\n and validation_method not in allowed_values\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `validation_method` ({0}), must be one of {1}\".format( # noqa: E501\n validation_method, allowed_values\n )\n )\n\n self._validation_method = validation_method", "def get_network_plugin_mode(self) -> Union[str, None]:\n return self._get_network_plugin_mode(enable_validation=True)", "def eval_mode(self):\n return self._eval_mode", "def linear_mode_config(self) -> ConfigType | None:\n if not self.is_strategy_supported(CalculationStrategy.LINEAR):\n raise UnsupportedStrategyError(\n f\"Strategy linear is not supported by model: {self._model}\",\n )\n return self._json_data.get(\"linear_config\")", "def type(self) -> pulumi.Input[Union[str, 'ValidationThresholdType']]:\n return pulumi.get(self, \"type\")", "def get_validator_kwargs(self):\n return {\n 'schema': self.get_validation_schema(),\n }", "def _get_mode(self):\n self._validate_mode()\n return deepcopy(self.mode)", "def get_rules(cls):\n raise NotImplementedError()", "def strategy_id(self) -> int:\n return self._strategy_id", "def getValidations(self):\n return self.objectValues('InstrumentValidation')", "def init_validation(self):\n log.warning(\"Running a ModelInterface validation initialization that was not overriden: this is a no-op.\")\n data = {}\n return data", "def data_validade(self):\n return self._data_validade", "def get_lr(self):\n\n if self.opt is None:\n raise ValueError('No learning rate schedulers initialized')\n else:\n for pg in self.opt.param_groups:\n return pg['lr']", "def getDecision(self):\n return self.decision", "def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")", "def rule(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"rule\")", "def grouping(self) -> pulumi.Input[Union[str, 'ValidationThresholdGrouping']]:\n return pulumi.get(self, \"grouping\")", "def spec(self) -> Optional['outputs.ValidatingAdmissionPolicyBindingSpec']:\n return pulumi.get(self, \"spec\")", "def authorization_strategy(self) -> pulumi.Input['FhirDatastoreIdentityProviderConfigurationAuthorizationStrategy']:\n return pulumi.get(self, \"authorization_strategy\")", "def get_extra_validators(self):\n return self.extra_validators", "def get_rotation_poll_interval(self) -> Union[str, None]:\n return self._get_rotation_poll_interval(enable_validation=True)", "def getValidityFlag(self):\n return _libsbml.SBMLLevelVersionConverter_getValidityFlag(self)", "def is_valid(self):\n return self._is_valid", "def get_step_conf(self):\n return self.step_conf", "def validate(self, obj, name, value):\n if self.validation_trait:\n return self.validation_trait.validate(obj, name, value)\n return value", "def config_rule_state(self) -> str:\n return pulumi.get(self, \"config_rule_state\")", "def getFlow(self):\n return self._flow", "def test_kyc_get_validation(self):\n pass", "def getApplicableValidators(self):\n return _libsbml.SBMLDocument_getApplicableValidators(self)", "def dqm_validation(self, ds_name):\n metadata = self.metadata(ds_name)\n if (not metadata):\n return {}\n return metadata[\"_dqmValidation\"]", "def validation_step(self, batch, batch_idx):\n return self._generative_step(batch)", "def validate(self):\n return self.validator.validate(self.fields)" ]
[ "0.6838031", "0.6627097", "0.6515644", "0.6456136", "0.6417892", "0.63516897", "0.634212", "0.63212353", "0.6285395", "0.6255339", "0.6221338", "0.62149113", "0.6194205", "0.61286926", "0.60330224", "0.60326296", "0.60178643", "0.5966911", "0.5960197", "0.59587127", "0.58250636", "0.5818182", "0.58013624", "0.57982504", "0.5790065", "0.5705156", "0.567935", "0.56018883", "0.5594347", "0.55285174", "0.55285174", "0.5483286", "0.54813856", "0.54742616", "0.54619557", "0.54398096", "0.5394162", "0.535147", "0.5307053", "0.52716357", "0.5180698", "0.5178305", "0.5131019", "0.51292855", "0.5127458", "0.5108888", "0.5097912", "0.5077093", "0.5073638", "0.5057112", "0.5037827", "0.5037827", "0.5037827", "0.5037827", "0.5037827", "0.5037827", "0.5037827", "0.5032652", "0.5021821", "0.5004298", "0.5003569", "0.4997259", "0.49937847", "0.49937847", "0.49937847", "0.49651837", "0.4951632", "0.49509886", "0.49373624", "0.4934767", "0.49212056", "0.49168873", "0.49155742", "0.49137792", "0.4891979", "0.48904985", "0.4883086", "0.48821685", "0.48636848", "0.48561087", "0.4850636", "0.48485562", "0.48485562", "0.48485562", "0.48348874", "0.4826436", "0.48232397", "0.48116693", "0.48075435", "0.4802078", "0.47909284", "0.47864547", "0.47758663", "0.47709644", "0.47673738", "0.47670433", "0.4754789", "0.47500545", "0.47466344", "0.47455552" ]
0.67801
1
Gets the current verbose
def _get_verbose(self): self._validate_verbose() return deepcopy(self.verbose)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verbose(self):\n return self.conf.get(\"verbose\")", "def verbose():\n return _verbose", "def verbose(self):\n return self._verbose", "def verbose(self):\n return self._verbose", "def verbose(self):\n return self._verbose", "def verbose(self):\n return self._verbose", "def verbose():\n return Verbose.level()", "def getVerbose(self):\n return self.__VERBOSE", "def verbose( self ):\n return Verbose.__level", "def verbose(self):\n\n return self._verbose", "def _verbose(self):\n return self._toBool(os.environ.get('VERBOSE', 0))", "def isVerbose(self):\n return self.opts.verbose", "def is_verbose():\n return g_verbose", "def verbose(self):\n enabled = self.lib.iperf_get_verbose(self._test)\n\n if enabled:\n self._verbose = True\n else:\n self._verbose = False\n\n return self._verbose", "def verbosity(self):\n return self._get('verbosity')", "def _get_optuna_verbose(self):\n self._validate_optuna_verbose()\n # use only for mode Optuna\n if self._get_mode() != \"Optuna\":\n return True\n return deepcopy(self.optuna_verbose)", "def is_verbose() -> bool:\n return VERBOSE", "def verbose(self):\n verbose = self.__class__.__name__ + \", alpha: \" + str(self.alpha)\n return verbose", "def verbose():\n GLOBAL['VERBOSE'] = True", "def verbosity(self):\n return self._verbosity", "def verbose_str(self):\n return self.summary.verbose(self.results) or ''", "def verbose(value=None):\n global verbosity\n\n if value != None:\n verbosity = value\n \n try:\n rval = verbosity\n except NameError:\n verbosity = False\n rval = verbosity\n\n return rval", "def verbosePref(self):\n # If the level of the object is below the Preference level,\n # recursively calls base (super) classes to get preference at specified level\n return self.get_pref_setting_for_level(VERBOSE_PREF, self._verbose_pref.level)[0]", "def __read_verbose_param(self, context):\n self.__verbose = False\n if context.args and context.args[0] in \"verboseVERBOSE\":\n self.__verbose = True", "def verbose(obj, msg):\n return obj.verbose(msg)", "def logging_verbosity(self):\n\n return self.get_raw(\"logging_verbosity\")", "def verbose(self, value):\n if value > self.DEBUG:\n value = self.DEBUG\n if value < self.NONE:\n value = self.NONE\n self._verbose = value", "def verbose ( self , message , *args , **kwargs ) :\n return self.logger.verbose ( message , *args , **kwargs )", "def _ansible_verbose(verbose_level=1):\n flag = ''\n if verbose_level > 1:\n flag = f'-{\"v\" * (verbose_level - 1)}'\n return flag", "def verbose(ctx, msg, *args):\n if ctx.verbose:\n info(msg, *args)", "def verbose(self, enabled=True):\r\n self.verbose = verbose", "def verbose(self, verbose):\n self._verbose = verbose", "def _set_verbose(value):\n global VERBOSE\n VERBOSE = value", "def setVerbose(newVal):\n global verbose\n verbose = newVal", "def verbosity_for_session(request):\n return request.config.getoption(\"--verbosity-project\")", "def known_verbose_name():\n return 'test Verbose name'", "def enable_verbose(self):\n self.verbose = True", "def set_verbose(self, v):\n self._verbose = bool(v)", "def GetVerbosityLevel(self):\n if self.verbose and self.simulation_verbose:\n\n return 2\n\n elif self.verbose and not self.simulation_verbose:\n\n return 1\n\n elif not self.verbose and self.simulation_verbose:\n\n return 1\n\n else:\n\n return 0", "def verbosity(v):\n assert v in [0,1,2] # debug, warn, info\n GLOBAL['VERBOSITY'] = v", "def verbose(self, state):\n self._verbose = state", "def SetVerbose(new_verbose=True):\n global _verbose\n _verbose = new_verbose", "def isVerboseDebug(self):\n self.edLogging.isVerboseDebug()", "def setVerbose(self, v):\n return self._set(verbose=v)", "def setVerbose(self, value):\n return self._set(verbose=value)", "def setVerbose(self, value):\n return self._set(verbose=value)", "def Verbose(on_off=\"on\"):\n\n global verbose\n \n if on_off.isdigit():\n int_value = int(on_off)\n else:\n int_value = 1\n\n if on_off.lower() == \"off\":\n int_value = 0\n print \"Verbose disabled.\"\n elif on_off.lower() == \"on\":\n int_value = 1\n print \"Verbose enabled.\"\n \n if -1 < int_value < 3:\n verbose=int_value\n interface.VERBOSE=int_value\n else:\n raise TypeError", "def get_verbose_name(self):\n return self.verbose_name or self.__class__.__name__", "def vprint(string):\n global verbose\n if verbose:\n print(string)", "def _do_set_verbose(self, args):\r\n verbose = int(args[1])\r\n self.server.set_verbose(verbose)\r\n return \"%d\" % verbose", "def do_verbose(self, arg):\n global verbose\n if verbose == 1:\n verbose = 0\n # prtin and add to log file \n logmsg = \" INFO: verbose mode disable\"\n log(logmsg)\n else:\n verbose = 1\n # prtin and add to log file \n logmsg = \" INFO: verbose mode enable\"\n log(logmsg)", "def is_verbose_log_enabled(self):\n\t\treturn bool(call_sdk_function('PrlDispCfg_IsVerboseLogEnabled', self.handle))", "def toggleVerbose(self):\n self.__VERBOSE = not self.__VERBOSE", "def print_verbose(args, msg):\n if args.verbose:\n print(msg)", "def _verbose(self,text):\n if self.verbose:\n print(text)", "def print_if_verbose(self, log):\n\n if self.verbose:\n print(log)\n return log", "def print_verbose(message:str):\n if params['verbose']:\n print(message)\n return", "def set_verbose(self, verbose):\n self._verbose = verbose", "def gnupg_verbose():\n if LOGGER.getEffectiveLevel() == logging.DEBUG:\n return [\"--verbose\"]\n\n return [\"-q\"]", "def get_verbosity_level():\n try:\n level = rcp.get(\"verbosity\",\"level\").upper()\n return convert_logging_level(level)\n except:\n logging.warning(\"[verbosity] section of the config malformed.\")\n return False", "def get_level(self):\n return self.debug_level, self.verbosity", "def verbose_log_link(self) -> str:\n return pulumi.get(self, \"verbose_log_link\")", "def get_display_options(verbose=False):\n if verbose:\n pprint(display_options)\n return display_options", "def convert_verbose_to_new(verbose):\n # In case the new verbosity is used, convert to the old one.\n if verbose is None: verbose=0\n if not isinstance(verbose, str) and verbose<10:\n status_map = {\n 'None': 'silent',\n 0: 'silent',\n 6: 'silent',\n 1: 'critical',\n 2: 'warning',\n 3: 'info',\n 4: 'debug',\n 5: 'debug'}\n if verbose>=2: print('[colourmap] WARNING use the new verbose status. This will be deprecated in future versions.')\n return status_map.get(verbose, 0)\n else:\n return verbose", "def setVerbose(self, verbose):\n self._verbose = verbose", "def v_action(option,opt_str,value,parser):\n cmdline_main.message(\"Enabling verbose message output.\")\n if hasattr(parameterized,'get_logger'):\n parameterized.get_logger().setLevel(parameterized.VERBOSE)\n else: # For versions of the param package before 9 May 2013\n parameterized.min_print_level=parameterized.VERBOSE", "def verbose(module, message):\n if loggingLevel >= loggingLevelVerbose:\n ModuLog.log(\"V\", module, message)", "def set_verbose(self, verbose):\n self._shared.set_verbose(verbose)", "def parse_verbose_option(ctx: click.Context, param: click.Parameter, value: Optional[bool]) -> None:\n if value:\n logger = container.logger()\n logger.debug_logging_enabled = True", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def setVerbose(*args,**kwargs):\n verbose = args[0] if args else True\n if verbose:\n verbositySampleTools = 2\n verbosityPlotTools = 2\n verbosityVariableTools = 2\n verbositySelectionTools = 2\n verbosityWJ = 2", "def setVerbose(self, verboseValue):\n return self._set(verbose=verboseValue)", "def init_verbose_print(verbose=True, vfunc=print, nvfunc=None):\n global verboseprint\n if verbose:\n verboseprint = vfunc\n else:\n if not nvfunc:\n verboseprint = lambda *a, **k: None\n else:\n verboseprint = nvfunc\n return verboseprint", "def verbose(self, *args):\n\n if self.is_on(_Log.VERBOSE):\n self._write(self._out, *args)", "def _get_vc_mode(self):\n return self.__vc_mode", "def unittest_verbosity():\n frame = inspect.currentframe()\n while frame:\n self = frame.f_locals.get(\"self\")\n if isinstance(self, unittest.TestProgram):\n return self.verbosity\n frame = frame.f_back\n return 0", "def set_verbose(verbosity: bool) -> None:\n global VERBOSE # pylint: disable=global-statement\n VERBOSE = verbosity", "def tunnel2_log_options(self) -> pulumi.Output['outputs.VpnConnectionTunnel2LogOptions']:\n return pulumi.get(self, \"tunnel2_log_options\")", "def _check_verbose(verbose: Any) -> int:\n logging_types = dict(\n DEBUG=logging.DEBUG,\n INFO=logging.INFO,\n WARNING=logging.WARNING,\n ERROR=logging.ERROR,\n CRITICAL=logging.CRITICAL,\n )\n\n _check_type(verbose, (bool, str, \"int\", None), item_name=\"verbose\")\n\n if verbose is None:\n verbose = logging.WARNING\n elif isinstance(verbose, str):\n verbose = verbose.upper()\n _check_value(verbose, logging_types, item_name=\"verbose\")\n verbose = logging_types[verbose]\n elif isinstance(verbose, bool):\n if verbose:\n verbose = logging.INFO\n else:\n verbose = logging.WARNING\n elif isinstance(verbose, int):\n verbose = _ensure_int(verbose)\n if verbose <= 0:\n raise ValueError(\n \"Argument 'verbose' can not be a negative integer, \"\n f\"{verbose} is invalid.\"\n )\n\n return verbose", "def test_click_verbose_option(self) -> None:\n @click.command('test_func')\n @click_verbose_option\n def test_func(verbose: bool) -> None:\n \"\"\"Function for testing options.\"\"\"\n click.echo(verbose)\n\n default_result: Result = self.runner.invoke(test_func, [])\n flag_result: Result = self.runner.invoke(test_func,\n args=['--verbose'])\n envvar_result: Result = self.runner.invoke(test_func, env={'F_VERBOSE': '1'})\n\n self.assertEqual(default_result.output, 'False\\n',\n 'output should be `False` when flag is not '\n 'present')\n self.assertEqual(flag_result.output, 'True\\n',\n 'output should be `True` when flag is '\n 'present')\n self.assertEqual(envvar_result.output, 'True\\n',\n 'output should be `True` when flag is not'\n 'present but env var is set')", "def cli(verbose):\n level = (logging.WARNING, logging.INFO, logging.DEBUG)[min(verbose, 2)]\n logging.basicConfig(level=level)", "def setVerboseOuput(cls, state: bool) -> None:\n\n cls.VERBOSE_OUTPUT = state", "def setVerboseLevel(self,verbose):\n\tself.verbose=verbose\n\tif self.verbose and self.dbType=='sqlite':\n\t print \"db isolation\",self.db.isolation_level", "def vprint(msg):\n if defaults.verbose:\n print(msg)", "def __init__(self, verbose: int = 0) -> None:\r\n self.verbose = verbose", "def setVerboseOn(self):\n self.edLogging.setVerboseOn()", "def if_verbose(message):\n if args.verbose:\n logging.info(message)\n global_timer()", "def debug(self):\n return self.settings['debug']", "def verbose(string, level, indent=None):\n if args.verbose:\n if args.verbose > level:\n if indent is None:\n if level <= LEVEL_4:\n indent = \" \" * level\n else:\n indent = \" \"\n print (indent + string)\n return", "def verbosePref(self, setting):\n self.set_preference(candidate_info=setting, pref_ivar_name=VERBOSE_PREF)", "def localStageOutOption(self):\n return self.localStageOut['option']", "def test_valid_verbose(verbose: Any) -> None:\n check_verbose(verbose)", "def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())", "def VFlag(self):\n return self._get_attribute('vFlag')", "def get_verbosity(count, level):\n\tif count == 0 and not level:\n\t\treturn lnk.config.get('lnk', 'settings')['verbosity']\n\telif level:\n\t\treturn level\n\treturn count", "def detailed_status(self) -> str:\n return pulumi.get(self, \"detailed_status\")", "def info(self):\n import tc\n ## enumerate all options\n opts = self.to_list()\n res = \"\"\n fmt = \"%20s = %5s ## %s\\n\"\n\n for k, v in opts:\n res += fmt % (k, str(self.__getattribute__(k)),\n str(v.doc()).split('\\n')[0])\n\n return res", "def getLivemode(self):\n return self.base.get(\"livemode\", [])", "def tunnel1_log_options(self) -> pulumi.Output['outputs.VpnConnectionTunnel1LogOptions']:\n return pulumi.get(self, \"tunnel1_log_options\")" ]
[ "0.8428105", "0.8374118", "0.83612645", "0.83612645", "0.83612645", "0.83612645", "0.83353597", "0.8318962", "0.82725775", "0.8234203", "0.772145", "0.7617219", "0.7324933", "0.7297833", "0.7150878", "0.7135847", "0.7123028", "0.71162844", "0.7113516", "0.7033205", "0.7024528", "0.69975334", "0.67171705", "0.65999764", "0.65442204", "0.6543045", "0.64774865", "0.64735156", "0.6430088", "0.6289534", "0.6263101", "0.62578386", "0.6236899", "0.62345773", "0.61960036", "0.61232173", "0.61157215", "0.6104469", "0.60919404", "0.6091841", "0.60681856", "0.60624546", "0.6058492", "0.60576475", "0.6029449", "0.6029449", "0.5997882", "0.59617704", "0.59474313", "0.5942256", "0.5939457", "0.587573", "0.5872968", "0.58407366", "0.5827265", "0.5820204", "0.58070964", "0.5776632", "0.5753671", "0.57399637", "0.57140255", "0.57133234", "0.567371", "0.5668656", "0.5663954", "0.5655439", "0.5622031", "0.56180257", "0.55960256", "0.55905336", "0.55905336", "0.5572835", "0.5546948", "0.5539027", "0.5529071", "0.55270714", "0.55110073", "0.5496114", "0.5480603", "0.5451291", "0.5445833", "0.54252213", "0.5411772", "0.54090065", "0.5403833", "0.5388484", "0.5376907", "0.53591835", "0.5354864", "0.5351445", "0.53404814", "0.5337974", "0.53344387", "0.53301376", "0.5319991", "0.52867824", "0.52832437", "0.52823573", "0.5276623", "0.52648175" ]
0.8258136
9
Gets the current explain_level
def _get_explain_level(self): self._validate_explain_level() if self.explain_level == "auto": if self._get_mode() == "Explain": return 2 if self._get_mode() == "Perform": return 1 if self._get_mode() == "Compete": return 0 if self._get_mode() == "Optuna": return 0 else: return deepcopy(self.explain_level)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def help_explain(self):\n print(EXPLAIN)", "def level(self):\n return self.__level", "def level(self):\n return self.__level", "def getLevel(self):\n return _libsbml.ASTBasePlugin_getLevel(self)", "def get_level(self):\n return self.debug_level, self.verbosity", "def level(self) -> int:\n return self.__state.level()", "def level(self):\n return self._level", "def level(self):\n return self._level", "def level(self):\n return self._level", "def level(self):\n return self._level", "def level(self) -> int:\n return self._level", "def level(self) -> int:\n return self._level", "def get_level(self) -> int:\n return self.rstate.level()", "def level(self) -> int:\n return self.categorization.level(self)", "def getLevel(self):\n return _libsbml.SBasePlugin_getLevel(self)", "def getResultLevel(self):\n return _libsbml.DefaultTerm_getResultLevel(self)", "def explain(self):", "def explain(self, *, format=None, **options):\n return self.query.explain(using=self.db, format=format, **options)", "def getLevel(self):\n return self._level", "def Explain(self, request, global_params=None):\n config = self.GetMethodConfig('Explain')\n return self._RunMethod(\n config, request, global_params=global_params)", "def logging_verbosity(self):\n\n return self.get_raw(\"logging_verbosity\")", "def currentLevel( self ):\n assert isinstance( self._env, Env )\n assert isinstance( self._steps, list )\n\n return self._env.level( )", "def getLevel(self):\n return _libsbml.SBase_getLevel(self)", "def level(self):\n return self.init_v[2]", "def verbose( self ):\n return Verbose.__level", "def explain_query(self, query):\n return self.user_con.explain_query(query)", "def option(self):\r\n return conf.lib.clang_getDiagnosticOption(self, None)", "def verbosity(self):\n return self._verbosity", "def unittest_verbosity():\n frame = inspect.currentframe()\n while frame:\n self = frame.f_locals.get(\"self\")\n if isinstance(self, unittest.TestProgram):\n return self.verbosity\n frame = frame.f_back\n return 0", "def get_debug_level(self):\n return self.debug_level", "def getResultLevel(self):\n return _libsbml.FunctionTerm_getResultLevel(self)", "def verbosity(self):\n return self._get('verbosity')", "def get_level(self):\n try:\n return self.root_node()['document_level']\n except KeyError:\n return None", "def get_log_level(self) -> int:\n logging.debug(self.args)\n return getattr(logging, self.args.loglevel.upper(), None)", "def explain(self):\n data, end = \\\n self.pat.traverse(lambda obj, *args: obj.explain(*args),\n self.begin, self.data)\n return data", "def _mysql_get_effective_sql_mode(engine):\n # Get the real effective SQL mode. Even when unset by\n # our own config, the server may still be operating in a specific\n # SQL mode as set by the server configuration.\n # Also note that the checkout listener will be called on execute to\n # set the mode if it's registered.\n row = engine.execute(\"SHOW VARIABLES LIKE 'sql_mode'\").fetchone()\n if row is None:\n return\n return row[1]", "def explain(self, *extra_details: ExplainDetail) -> str:\n TEXT = get_gateway().jvm.org.apache.flink.table.api.ExplainFormat.TEXT\n j_extra_details = to_j_explain_detail_arr(extra_details)\n return self._j_statement_set.explain(TEXT, j_extra_details)", "def contentlevel(self):\n return self.get(\"contentLevel\")", "def python_logging_level(self) -> int:\n return LOG_LEVEL_LEVELNOS[self]", "def getLevel(self):\n return self.level", "def statement_detail(self):\n return self.statement_detail_xml()", "def get_level(self, level):\n return", "def _get_log_level_delta(self) -> int:\n return (self._args.quiet * 10) - (self._args.verbose * 10)", "def explain(self):\n \n self.logger.verbose = False \n dbpath, config = self._start() \n \n if config.explain not in [\"specific\", \"general\"]:\n return \"--explain must be 'general' or 'specific'\"\n config.obo = check_file(config.obo, dbpath, \"obo\")\n \n # allow user to pass several model/reference pairs\n models = config.model.split(\",\")\n references = config.reference.split(\",\") \n M = len(models)\n \n if len(references) != M:\n raise Exception(\"incompatible number of models and references\")\n \n # use the packet to load information from the db, refset and models\n packet = prep_compute_packets(self.config, \n references=references, \n models=models,\n partition_size=M)[0]\n packet.prep()\n refset = packet.general_refset \n if config.explain == \"specific\":\n refset = packet.specific_refset\n refset.learn_obo(MinimalObo(config.obo))\n\n allresults = [None]*M\n for i, (modelid, refid) in enumerate(zip(models, references)):\n data = packet.models[modelid]\n result = refset.inference_chain(data, refid, verbose=True,\n fp_penalty=config.fp_penalty)\n allresults[i] = result.to_json(nodata=config.explain_nodata) \n \n return \"[\"+(\",\".join(allresults))+\"]\";", "def getLevel( self ):\n level = self.getEffectiveLevel()\n if level == logging.CRITICAL:\n return 'critical'\n elif level == logging.ERROR:\n return 'error'\n elif level == logging.WARNING:\n return 'warning'\n elif level == logging.INFO:\n return 'info'\n elif level == logging.DEBUG:\n return 'debug'\n elif level == logging.NOTSET:\n return 'notset'\n else:\n return 'unknown ({})'.format( level )", "def indentation_level(self):\n return self._indentation_levels[-1]", "def get_logging_level(self):\n return self.logging_level", "def getVerbose(self):\n return self.__VERBOSE", "def getIsolationLevel(self):\n\treturn self.db.isolation_level", "def getTargetLevel(self):\n return _libsbml.SBMLLevelVersionConverter_getTargetLevel(self)", "def log_level(self) -> str:\n return self._log_level", "def getLevel(self, *args):\n return _libsbml.CompExtension_getLevel(self, *args)", "def level( self ):\n assert isinstance( self._level, int )\n assert isinstance( self._steps, list )\n assert isinstance( self._outter, Env ) or ( self._outter is None )\n\n return self._level", "def GetVerbosityLevel(self):\n if self.verbose and self.simulation_verbose:\n\n return 2\n\n elif self.verbose and not self.simulation_verbose:\n\n return 1\n\n elif not self.verbose and self.simulation_verbose:\n\n return 1\n\n else:\n\n return 0", "def explain_string(self, plan: pb2.Plan, explain_mode: str = \"extended\") -> str:\n logger.info(f\"Explain (mode={explain_mode}) for plan {self._proto_to_string(plan)}\")\n result = self._analyze(\n method=\"explain\", plan=plan, explain_mode=explain_mode\n ).explain_string\n assert result is not None\n return result", "def lineno():\n return str(' - Statement - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def reenlightenment(self):\n return self._reenlightenment", "def expertise_level_id(self) -> int:\n return self._expertise_level_id", "def getAdminLevel(self):\n return self.__adminLevel", "def min_level(self):\n return self.__min", "def getOutputLevel(self):\n return _libsbml.Output_getOutputLevel(self)", "def _inspect_mode(self):\n return self._project._inspect_mode", "def get_level(command):\n if is_delete(command):\n return None\n elif is_get(command):\n return int(command.split(\" \")[2])\n elif is_insert(command) or is_update(command):\n return int(command.split(\" \")[3])", "def level(self) -> Optional[LogLevel]:\n return None", "def log_level(self) -> str:\n return pulumi.get(self, \"log_level\")", "def get_output_level(self):\n outlvl = self._local_logger_level\n if outlvl is None:\n outlvl = self.config.output_level\n\n return outlvl", "def explain(thing: Union[str, HDPlisp], level: int = 1):\n print((\">\" * level) + str(thing))", "def previous_hvac_mode(self) -> str:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"previous_hvac_mode\"))\r\n return self._previous_hvac_mode", "def hvac_mode(self) -> str:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"hvac_mode\"))\r\n return self._hvac_mode", "def log_level(self) -> LogLevel:\n if self.is_quiet:\n return LogLevel.NONE\n return LogLevel(self.logger.getEffectiveLevel())", "def final_level(self):\n return self.get(self._names[\"final_level\"])", "def _get_lsp_config_isis_shortcut_level(self):\n return self.__lsp_config_isis_shortcut_level", "def t_level_qc(self, originator=False):\n index = self.var_index()\n return self.var_level_qc(index, originator=originator)", "def verbose():\n return Verbose.level()", "def indent_level(self):\n return len(self._tagstack) - 1", "def get_logging_level():\n return logging.getLevelName(logging.getLogger().getEffectiveLevel())", "def getLevel(self, *args):\n return _libsbml.LayoutExtension_getLevel(self, *args)", "def maximum_level(self, question_type):\n\t\treturn 2", "def getIndentationLevel(self, code_line):\n print(\"the code line : \", code_line)\n return len(code_line) - len(code_line.lstrip(\" \"))", "def verbose(self):\n return self.conf.get(\"verbose\")", "def help_analyze(self):\n print(ANALYZE)", "def trigger_mode(self):\n return self.trigger_mode_index", "def _explain_model(self):\n raise NotImplementedError", "def getLevel(self, *args):\n return _libsbml.QualExtension_getLevel(self, *args)", "def conductor(self):\n return self._S.level()", "def understandability(self):\n # self._understandability = - 0.33 * self.ANA + 0.33 * self.DAM - 0.33 * self.DCC + 0.34 * self.CAMC \\\n # - 0.33 * self.NOP - 0.33 * self.NOM - 0.33 * self.DSC\n self._understandability = - 0.33 * self.ANA + 0.66 * self.DAM - 0.33 * self.DCC + 0.66 * self.CAMC \\\n - 0.33 * self.NOP - 0.33 * self.NOM\n return round(self._understandability, 5)", "def get_graph_optimization(self):\n optimization_levels = {'DISABLE_ALL': ort.GraphOptimizationLevel.ORT_DISABLE_ALL,\n 'ENABLE_BASIC': ort.GraphOptimizationLevel.ORT_ENABLE_BASIC,\n 'ENABLE_EXTENDED': ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED,\n 'ENABLE_ALL': ort.GraphOptimizationLevel.ORT_ENABLE_ALL}\n level = self.cur_config['graph_optimization']['level']\n assert level in optimization_levels, \"the optimization choices \\\n are {}\".format(optimization_levels.keys())\n return optimization_levels[level]", "def _do_get_level(self):\n logging.info(__name__ + ' : Read level of channel 1')\n result = self._execute('R1')\n return float(result.replace(\"R\", \"\")) / 10", "def get_verbosity_level():\n try:\n level = rcp.get(\"verbosity\",\"level\").upper()\n return convert_logging_level(level)\n except:\n logging.warning(\"[verbosity] section of the config malformed.\")\n return False", "def currentMode(self):\n logger.debug(\"Func: currentMode/getter\")\n\n return self._currentsDict[\"currentMode\"]", "def verbose(self):\n return self._verbose", "def verbose(self):\n return self._verbose", "def verbose(self):\n return self._verbose", "def verbose(self):\n return self._verbose", "def getLevel(self, *args):\n return _libsbml.FbcExtension_getLevel(self, *args)", "async def _get_migration_level(self, table: str) -> Optional[int]:\n level = await self.conn.fetchval('SELECT level FROM tinymud_migrations WHERE table_name = $1', table)\n assert level is None or isinstance(level, int)\n return level", "def level(self) -> pulumi.Input[Union[str, 'Level']]:\n return pulumi.get(self, \"level\")", "def __level(self, *args, **kwargs):\n pass", "def explain_http_uri(self) -> Optional[str]:\n return pulumi.get(self, \"explain_http_uri\")", "def help(self):\n return self._help" ]
[ "0.62678665", "0.59786433", "0.59786433", "0.5973489", "0.5863395", "0.58591205", "0.58441484", "0.58441484", "0.58441484", "0.58441484", "0.5830532", "0.5830532", "0.57422584", "0.55639464", "0.55551803", "0.55497247", "0.5544945", "0.55366", "0.5531055", "0.55061364", "0.54631025", "0.5451836", "0.5439167", "0.5430806", "0.540714", "0.53675157", "0.5367409", "0.5332075", "0.531812", "0.53156674", "0.53087735", "0.5276315", "0.52441883", "0.52430695", "0.5238409", "0.52290344", "0.52263176", "0.51918375", "0.519037", "0.51832926", "0.5178351", "0.5131888", "0.5123961", "0.50848", "0.5082648", "0.5065992", "0.50513613", "0.50377315", "0.5017878", "0.5015874", "0.50074345", "0.49829116", "0.49750727", "0.49605915", "0.49528486", "0.49390614", "0.4933974", "0.49200332", "0.49167952", "0.48942515", "0.48888266", "0.48878497", "0.48843175", "0.48799163", "0.48681864", "0.48637682", "0.48635748", "0.48568353", "0.48542604", "0.48517013", "0.48369592", "0.48339227", "0.48162457", "0.48152918", "0.48085487", "0.4806597", "0.4803918", "0.48026693", "0.4799597", "0.47952667", "0.47579515", "0.47521958", "0.47337267", "0.47321233", "0.47310957", "0.47296596", "0.471578", "0.47083005", "0.4707954", "0.4707675", "0.46991974", "0.46991974", "0.46991974", "0.46991974", "0.46933672", "0.4692404", "0.46915752", "0.4682802", "0.46811068", "0.4676393" ]
0.86530817
0
Gets the current features_selection
def _get_features_selection(self): self._validate_features_selection() if self.features_selection == "auto": if self._get_mode() == "Explain": return False if self._get_mode() == "Perform": return True if self._get_mode() == "Compete": return True if self._get_mode() == "Optuna": return False else: return deepcopy(self.features_selection)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSelection(self):\n return self.selection", "def GetSelection(self):\r\n\r\n return self.selection", "def GetSelection(self):\r\n\r\n return self._current", "def GetSelection(self):\n \n return self.selected", "def get_selected_nodes(self):\n return self._selection", "def active_selection():\r\n\r\n om.MGlobal.getActiveSelectionList()", "def currentSelection():\n sel = bpy.context.selected_objects\n if sel:\n return sel\n else:\n col = activeCollection()\n if col is not None:\n # Filter only mesh objects.\n return collectionMeshes(col)", "def get_selection(self, name):\n print 'hi being selected in plotdata'\n return self.selections.get(name, None)", "def _getAsSelection(self):\n return self._asSelection", "def get_selected(self):\n return self.selected", "def feature(self):\n return self._feature", "def feature(self):\n return self._feature", "def current_choice(self):\n\t\treturn self.choice_data_list[self.select_index]", "def get_active(cls) -> FeatureSet:\n if cls._active_feature_set is None:\n raise RuntimeError('No FeatureSet being actively defined.')\n return cls._active_feature_set", "def get_features(self):\n return self._features", "def selection(self) -> str:\n return self._selection", "def GetSelection(self):\n return self.__selected_item", "def get_features(self):\n if not self.exposes_features:\n return None\n\n return self._last_features", "def selected(self):\n return self._selected", "def GetSelection(self):\r\n \r\n return self._curpage", "def benchmark_selection(self):\n return self._benchmark_selection", "def get_selection(self, selection_name, format=None):", "def _feature_selection(self , x ,y):\n # initialize good features list\n # and best scores to keep track of both\n good_features = []\n best_scores = []\n\n # calculating the number of features\n num_features = x.shape[1]\n\n # infinite loop\n while True:\n # intialize best feature and score of this loop\n this_feature = None\n best_score = 0\n\n # loop over all features\n for feature in range(num_features):\n # if feature is already in good features,\n # skip this for loop\n if feature in good_features:\n\n continue\n # selected features are all good till now\n # and current feature\n selected_features = good_features + [feature]\n # remove all other feature from the data\n xtrain = x[: , selected_features]\n # calculate the score , in our case AUC\n score = self.evaluate_score(xtrain , y)\n # if score is greater then the best score\n # of this loop, change best score and best feature\n if score > best_score:\n this_feature = feature\n best_score = score\n\n # if we have selected a feature , add it to\n # the good feature list and update best score list\n if this_feature != None:\n good_features.append(this_feature)\n best_scores.append(best_score)\n\n # if we did not improve during the last two rounds,\n # exit the while loop\n if len(best_score) > 2:\n if best_scores[-1] < best_scores[-2]:\n break\n\n # return the best score and good features\n # why do we remove the last data point?\n return best_scores[:-1] , good_features[:-1]", "def get_selected_features(dataset_features, model):\r\n model = SelectFromModel(model, prefit=True)\r\n feature_bool_mask = model.get_support()\r\n selected_features = dataset_features.columns[feature_bool_mask]\r\n transformed_dataset = pd.DataFrame(model.transform(dataset_features), columns=dataset_features.columns[feature_bool_mask], index=dataset_features.index)\r\n return selected_features, transformed_dataset", "def features(self):\n return self._features", "def __window_getSelection(self):\n return None", "def selected(self):\n return self._choices[self._selected][0]", "def select_features(\n self, x_train, y_train, estimator, selection_type, selection_params\n ):\n x_train = _check_X(x_train)\n if selection_type == \"regularization\":\n fe_sel_ = SelectFromModel(estimator)\n fe_sel_.fit(x_train, y_train)\n selected_feat = x_train.columns[(fe_sel_.get_support())]\n # get_support returns list of Bool values where a column is important or not\n return fe_sel_, selected_feat\n else:\n try:\n from mlxtend.feature_selection import (\n SequentialFeatureSelector as sfs,\n ) # noqa\n except ImportError as e:\n raise ImportError(\n \"{} using recursion requires {} from {}. \"\n \"You can install with `pip install {}`\".format(\n \"select_features\",\n \"SequentialFeatureSelector\",\n \"mlxtend\",\n \"mlxtend\",\n )\n ) from e\n fe_sel_ = sfs(estimator, **selection_params)\n fe_sel_.fit(x_train, y_train)\n return fe_sel_, fe_sel_.k_feature_names_", "def GetOldSelection(self):\r\n\r\n return self.old_selection", "def get_selected(self):\n return [sel.internalPointer().obj for sel in self.view.selectedIndexes()]", "def find_selected(self):\r\n return None", "def get_selection(self):\n if not len(self.GetSelectionBlockTopLeft()):\n selected_columns = self.GetSelectedCols()\n selected_rows = self.GetSelectedRows()\n if selected_columns:\n start_col = selected_columns[0]\n end_col = selected_columns[-1]\n start_row = 0\n end_row = self.GetNumberRows() - 1\n elif selected_rows:\n start_row = selected_rows[0]\n end_row = selected_rows[-1]\n start_col = 0\n end_col = self.GetNumberCols() - 1\n else:\n start_row = end_row = self.GetGridCursorRow()\n start_col = end_col = self.GetGridCursorCol()\n elif len(self.GetSelectionBlockTopLeft()) > 1:\n wx.MessageBox(\"Multiple selections are not supported\", \"Warning\")\n return []\n else:\n start_row, start_col = self.GetSelectionBlockTopLeft()[0]\n end_row, end_col = self.GetSelectionBlockBottomRight()[0]\n return [start_row, start_col, end_row, end_col]", "def selected_point_solution(self):\n return self._selected_point_solution", "def features_inputter(self):\n return getattr(\n self.examples_inputter, \"features_inputter\", self.examples_inputter\n )", "def get_selection():\n\n selected = Gui.Selection.getSelectionEx()[0].SubObjects\n sel_len = len(selected)\n result = SelectionContainer()\n\n for _x in range(0, sel_len):\n\n shape_type = selected[_x].ShapeType\n\n if shape_type == 'Vertex':\n result.vertices.append(selected[_x])\n\n elif shape_type == 'Edge':\n\n if 'Line' in str(selected[_x].Curve):\n result.lines.append(selected[_x])\n else:\n result.curves.append(selected[_x])", "def do_feat_sel(self, train_X, train_y):\n # feature selection based on training set only\n train_X_fs = pd.DataFrame(self.skb.fit_transform(train_X, train_y), index=train_X.index)\n scores = pd.DataFrame({'score': self.skb.scores_, 'pval': self.skb.pvalues_}).T\n scores.columns = train_X.columns\n scores.to_csv(self.save_path + '/feature_scores.csv')\n return train_X_fs", "def selected_index(self):\r\n return self._index_of_sel_point", "def getSelectedItem(self):\n return self.selected", "def feature_selection(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\t# percentile selector\n\tpercentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask = best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# model based selector\n\tmodel_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask = best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# iterative based selector\n\titerative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask = best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\tall_scores = []\n\n\tregressor.fit(train_features, train_similarity_target)\n\tprint(\"The score on all features: %.3f\" % regressor.score(test_features, test_similarity_target))\n\tall_scores.append(regressor.score(test_features, test_similarity_target))\n\n\t# show results for the percentile selector\n\tall_scores.append(percentile_score)\n\n\t# show results for the model based selector\n\tall_scores.append(model_based_score)\n\n\t# show results for the iterative based selector\n\tall_scores.append(iterative_based_score)\n\n\tmax_value_position = all_scores.index(max(all_scores))\n\n\tif max_value_position == 0:\n\t\tprint(\"Returning all features!\\n\")\n\t\treturn train_features, test_features\n\telif max_value_position == 1:\n\t\tpercentile_mask = build_mask(percentile_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'percentile_mask.txt')\n\t\tdebug_data(percentile_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the percentile selector!\\n\")\n\t\treturn percentile_selector, percentile_train_features_selected, percentile_test_features_selected\n\telif max_value_position == 2:\n\t\tmodel_based_mask = build_mask(model_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'model_based_mask.txt')\n\t\tdebug_data(model_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the model based selector!\\n\")\n\t\treturn model_based_selector, model_based_train_features_selected, model_based_test_features_selected\n\telse:\n\t\titerative_based_mask = build_mask(iterative_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'iterative_based_mask.txt')\n\t\tdebug_data(iterative_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the iterative based selector!\\n\")\n\t\treturn iterative_based_selector, iterative_based_train_features_selected, iterative_based_test_features_selected", "def get_other_features(self):\n return self.other_features", "def get_gold_selection(self, pointer):\n raise NotImplementedError", "def features(self):\n\n return self._features", "def _get_data_selection(self, event):\n data = None\n # selection = event.GetSelection()\n id, _, _ = self.FindFocus().GetSelection().GetData()\n data_list, theory_list = \\\n self.parent.get_data_manager().get_by_id(id_list=[id])\n if data_list:\n data = data_list.values()[0]\n if data is None:\n data = theory_list.values()[0][0]\n return data", "def get_selected_text(self):\r\n return self.selectedText()", "def selected(self):\n return self.__result", "def getCurrentSelection():\n node = cmds.ls(sl=True)\n if node:\n node = node[0]\n if cmds.nodeType(node) == 'alembicHolder':\n shape = node\n return shape\n else:\n relatives = cmds.listRelatives(node, shapes=True, f=1)\n if relatives:\n for i in relatives:\n if cmds.nodeType(i) == \"alembicHolder\":\n shape = i\n return shape\n return None", "def get_selection(self):\n return [\n self.id_value_map[item]\n for item in self.selection()\n if item in self.id_value_map\n ]", "def select_feature(self, masked_data, masked_binary_data, can_query):\n # Use the loaded model to perform feature selection.\n if 'rand' in self.method:\n selections = self.model.select_feature_rand(can_query)\n elif 'us' in self.method:\n selections = self.model.select_feature_us(can_query, self.question_encodings)\n elif 'rl' in self.method:\n _, _, selections = self.model.select_feature_rl(can_query, self.question_encodings, self.rl_net)\n self.previous_selections = selections\n\n return selections", "def _get_features(self, session):\n feature_utils.qsr_feature_extractor( session, get_location_objects = feature_utils.get_location_objects_most_active )\n feature_utils.standardize_simple(session, self.config)\n\n # feature_utils.marker_feature_extractor( session, get_location_objects = feature_utils.get_location_objects_most_active )\n\n return session[SESSION_FEAT]", "def selected_components(self):\n return self._selected_components", "def rightgetselected(self):\n return self._rightlist.getselect()", "def get_selected_features(pipeline, verbose = False):\n\n \n assert isinstance(pipeline, Pipeline), \"Input isn't a Pipeline\"\n assert isinstance(pipeline[0], ColumnTransformer), \"First step isn't a ColumnTransformer\"\n\n features = get_feature_names(pipeline[0], verbose=verbose)\n\n for i, step in enumerate(pipeline.steps[1:]):\n if verbose: print(i, \": \", step[0])\n \n if hasattr(step[1], 'get_support'):\n \n check_is_fitted(step[1])\n\n retained_cols = step[1].get_support()\n if verbose: print(sum(retained_cols), \"of\", len(retained_cols), \"retained, \",\\\n round(sum(retained_cols) / len(retained_cols) * 100, 1), \"%\")\n\n features = [feature for is_retained, feature in zip(retained_cols, features) if is_retained] \n\n return features", "def leftgetselected(self):\n return self._leftlist.getselect()", "def featureselection(self, df):\n try:\n # converting blank value to NaN value.\n df = df.replace(' ', np.nan)\n df[\"Long_emp_length\"] = \"\" # adding additional feature col.\n\n # loading list of features\n features = pd.read_csv(self.feature_selected)\n self.features = [x for x in features[\"0\"]]\n df = df[self.features]\n return df\n except Exception as e:\n self._Logger.error(\"Error in Feature Selection: {}\".format(e))", "def current_selection(self):\n row_ids = set(item.row() for item in self.tableView.selectedIndexes())\n sel = []\n for row_id in row_ids:\n row = self.table[row_id]\n sel.append('\\t'.join(row))\n return '\\n'.join(sel)", "def get_active(self):\n selections = self.view.selectedIndexes()\n if len(selections) == 0:\n return None\n\n return selections[0].internalPointer().obj", "def get_selected_ix( self ):\n selected_ix = self.listbox.curselection()\n\n if selected_ix == tuple( ):\n selected_ix = -1\n else:\n selected_ix = selected_ix[0] # since we allow only 1 selection\n\n return selected_ix", "def subset_(self):\n return self.X.ix[:, self.important_features_]", "def selected(self):\n\t\treturn [self.regions[int(i)]\n\t\t\tfor i in self.regionListing.hlist.info_selection()]", "def current_selection(self):\n row_ids = set(item.row() for item in self.tableView.selectedIndexes())\n sel = []\n for row_id in row_ids:\n row = self.table[row_id]\n sel.append('\\t'.join(row[self.table.ordinal:]))\n return '\\n'.join(sel)", "def get_featureRange(self):\n\n return self.featureRange", "def 取选中项索引(self): # real signature unknown; restored from __doc__\n return self.GetSelection()", "def curr_selection(self):\n\n self.domain = self.row[0]\n abstract = self.row[5]\n self.data_type = self.row[1]\n self.object_id = self.row[3]\n self.service = self.row[2]\n self.layer_title = self.row[4]\n crs_options = self.row[6]\n self.dlg.uCRSCombo.clear()\n if self.data_type != \"table\":\n self.dlg.uCRSCombo.addItems(crs_options)\n curr_crs = self.map_crs()\n if curr_crs in crs_options:\n idx = self.dlg.uCRSCombo.findText(curr_crs)\n self.dlg.uCRSCombo.setCurrentIndex(idx)\n self.dlg.uTextDescription.setText(abstract)", "def select(self):\n return self.recipeset", "def get_features(self):\n return []", "def GetSelected(self):\r\n gList = self.gList\r\n return [self.GetItem(x) for x in range(gList.GetItemCount())\r\n if gList.GetItemState(x,wx.LIST_STATE_SELECTED)]", "def get_selected_index(self) -> int:\n return self._selected_index", "def getChild(self):\n return self.features[0]", "def extract_feature(self, extractor, data, channel_selection=True):\n if channel_selection:\n data = data[self.ch_ind]\n trial_feat = extractor(data)\n return trial_feat", "def getFeatureGenerator(self):\n\t\treturn self.feature_generator", "def update_selection(self):\n raise NotImplementedError", "def feature_set(self) -> Optional[pulumi.Input['OrganizationFeatureSet']]:\n return pulumi.get(self, \"feature_set\")", "def is_selected(self):\n return self._selected", "def is_selected(self):\n return self._selected", "def get_who_features(self):\n return self.who_made_features", "def get_selected(self):\n\n # if there are items in the list\n if self.currentIndex() != -1:\n modelitem = self._dictmodel.itemFromIndex(\n self._modelindexes[ self.currentIndex() ]\n )\n return modelitem", "def select_features(feature_set):\n df_train = feature_set[\"train\"]\n df_valid = feature_set[\"valid\"]\n df_test = feature_set[\"test\"]\n\n # drop cols with NaN values\n df_train = df_train.drop(df_train.columns[df_train.isna().any()].tolist(), axis=1)\n\n # define Features and Target Dataset\n X_train = df_train.copy()\n \n # TSFresh feature selection\n selected_features = feature_selection.select_features(X_train, Y_train)\n\n # reduce dfs to selected features\n df_train_sel = df_train[selected_features.columns]\n df_valid_sel = df_valid[selected_features.columns]\n df_test_sel = df_test[selected_features.columns]\n\n print(f\"Shape after Selection: {df_train_sel.shape}\")\n\n return {\n \"name\": feature_set[\"name\"],\n \"train\": df_train_sel,\n \"valid\": df_valid_sel,\n \"test\": df_test_sel,\n }", "def nb_feature_select(self,estimator, X, y,cv_kfold=5):\n\n try:\n selector = RFECV(estimator, step=1,cv=cv_kfold, min_features_to_select=round((len(X.columns)/2)))\n selector = selector.fit(X,y)\n support = selector.support_\n selected = []\n for a, s in zip(X.columns, support):\n if(s):\n selected.append(a)\n return selected\n except Exception as e:\n print(e)", "def selected_index(self):\n return self._selected_index", "def selected(self, multi=False):\n selected_inputs = [input_elem for input_elem in self.inputs if input_elem.selected]\n selected_input = selected_inputs if multi else selected_inputs[0]\n return selected_input", "def select_features(self, x, y, **kwargs):\n if self.__k is None:\n self.__k = x.shape[1]\n self._params[\"k\"] = ParameterDefinition(MinMax(1, self.__k), int)\n val = int(np.around(np.random.uniform(1, self.__k)))\n self.__select_k_best.set_params(k=val)\n\n self.__select_k_best.fit(x, y)\n return self.__select_k_best.get_support()", "def feature_set(self) -> pulumi.Output[Optional['OrganizationFeatureSet']]:\n return pulumi.get(self, \"feature_set\")", "def _selection ( self, nick ) :\n \n if not self.__selections_.has_key ( self.name() ) :\n self.__selections_[ self.name() ] = {} \n \n return self.__selections_[ self.name() ].get( nick , None )", "def current_val(self):\n try:\n return self.listbox.get(self.listbox.curselection()[0])\n except IndexError:\n raise KeyError(\"Nothing selected\")", "def is_selected(self):\n return self.container['is_selected']", "def get(self, name):\n try:\n return(self._d_features[name])\n except:\n log.error(\"Can't get feature '%s'\" % name)\n return", "def environment_selection_strategy(self):\n return self._environment_selection_strategy", "def selection(self) -> Chromosome:\n # each chromosome has a fitness, and the lower the fitness, the higher the probability of election\n choices_list = list(range(len(self._population)))\n weights = [1 / chromosome.get_fitness() for chromosome in self._population]\n\n index = choices(choices_list, weights=weights)[0]\n\n return self._population[index]", "def GetStringSelection(self):\n \n return self.choices[self.selected].GetLabel()", "def from_selection(cls):\n guid = compas_rhino.select_point()\n return cls.from_guid(guid)", "def getSelectedNode(self):\n return self.__selectedNode", "def __get_split_feature(self, data_set, target_feature, tree_features):\n\n if self.__criterion == 'entropy':\n feature_gains = {feature: self.__gain(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = max(feature_gains, key=feature_gains.get)\n return split_feature\n elif self.__criterion == 'gini':\n feature_ginis = {feature: self.__gini(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = min(feature_ginis, key=feature_ginis.get)\n return split_feature\n # TODO: I should check this (gini index).", "def get_style1_features(self):\n return self.style1_features", "def advanced_features(self):\n return self._advanced_features", "def getCurrentTransformSelection():\n node = cmds.ls(sl=True)\n if node:\n node = node[0]\n if cmds.nodeType(node) == 'transform':\n xform = node\n return xform\n else:\n relatives = cmds.listRelatives(node, shapes=True, f=1)\n if relatives:\n for i in relatives:\n if cmds.nodeType(i) == \"transform\":\n xform = i\n return xform\n return None", "def get_selected_gs(self):\n cur = self.gslist.currentItem()\n if cur is None:\n return None\n name = str(cur.text())\n if name[0] == '*':\n return name[1:]\n return name", "def get_selected_assets(self):\n raise NotImplementedError", "def findFeatures(self):\n\t\tpass", "def selection(self, name):\n try:\n return self._selections[name]\n except KeyError:\n raise Pype9NameError(\n \"No selection named '{}' (possible '{}')\"\n .format(name, \"', '\".join(self.selection_names)))", "def GetSelectedFont(self):\r\n\r\n return self._selected_font" ]
[ "0.72452754", "0.7188117", "0.71176845", "0.6940054", "0.67970186", "0.6783767", "0.67610663", "0.6721341", "0.66002655", "0.6589361", "0.6549051", "0.6549051", "0.6516982", "0.64433926", "0.6428101", "0.64220285", "0.6418494", "0.6352769", "0.6308269", "0.62836784", "0.6261021", "0.6135232", "0.61306363", "0.6128573", "0.6118927", "0.61087346", "0.6099219", "0.60949004", "0.6089752", "0.6058946", "0.6042898", "0.599442", "0.5972795", "0.59513587", "0.59494936", "0.5945717", "0.59423465", "0.59302396", "0.59289837", "0.5924378", "0.591668", "0.5912811", "0.590871", "0.5902269", "0.58728766", "0.58700407", "0.58663756", "0.5840977", "0.58336186", "0.5815314", "0.58017236", "0.5798873", "0.5775778", "0.5775604", "0.5756241", "0.57486105", "0.57480067", "0.57464415", "0.5739808", "0.572199", "0.57136166", "0.5709473", "0.5704222", "0.5694647", "0.5693987", "0.56758416", "0.56748265", "0.5668978", "0.566667", "0.56573516", "0.56558037", "0.56536394", "0.5642438", "0.5642438", "0.5638978", "0.5630738", "0.5618238", "0.56076044", "0.5598584", "0.55969775", "0.55633295", "0.5555932", "0.55528677", "0.55336446", "0.55306304", "0.5529695", "0.55220366", "0.5520087", "0.55156934", "0.55107063", "0.55083084", "0.5503058", "0.5501156", "0.5482714", "0.5482171", "0.54807013", "0.546502", "0.54582006", "0.54537034", "0.5450375" ]
0.7440437
0
Gets the current start_random_models
def _get_start_random_models(self): self._validate_start_random_models() if self.start_random_models == "auto": if self._get_mode() == "Explain": return 1 if self._get_mode() == "Perform": return 5 if self._get_mode() == "Compete": return 10 if self._get_mode() == "Optuna": return 1 # just 1, because it will be tuned by Optuna else: return deepcopy(self.start_random_models)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exploring_starts(self):\n def random_choice(l): return l[np.random.randint(len(l))]\n return map(random_choice, (self.env.states, self.env.moves))", "def get_random_start(self):\n arr = np.zeros(self.dimension)\n n_fit_p = len(self.fit_parameters)\n n_nui_p = len(self.nuisance_parameters)\n arr[:n_fit_p] = self.get_random_fit_parameters\n arr[n_fit_p:n_fit_p+n_nui_p] = self.get_random_nuisance_parameters\n arr[n_fit_p+n_nui_p:] = self.get_random_wilson_coeffs_start\n return arr", "def getRandom(self):\n return random.choice(self.ls)", "def getStartState(self):\n return self._bot", "def getStartState(self):\n return self._bot", "def getStartState(self):\n \n pass", "def _random_start_position(self):\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)", "def scenelist(self):\n return self.caller.player_ob.db.random_scenelist or []", "def getStartState(self):\n\t\tutil.raiseNotDefined()", "def getStartState(self):\n\t\tutil.raiseNotDefined()", "def readFirst(self):\n return self.models[0].time_next", "def get_seed(self):\n return self.solver.get_model_trues(start=0, end=self.n)\n\n # slower:\n # model = self.solver.get_model()\n # return [i for i in range(self.n) if model[i]]\n\n # slowest:\n # seed = []\n # for i in range(self.n):\n # if self.solver.model_value(i+1):\n # seed.add(i)\n # return seed", "def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]", "def getStartState(self):\r\n\t\tutil.raiseNotDefined()", "def init_rnd(self):\n\n # query max number of threads\n gennum = apache.AP_MPMQ_MAX_SPARE_THREADS\n # make generators\n # this bit is from Python lib reference\n g = random.Random(time.time())\n result = [g]\n for i in range(gennum - 1):\n laststate = g.getstate()\n g = random.Random()\n g.setstate(laststate)\n g.jumpahead(1000000)\n result.append(g)\n return result", "def generate_random_start_state(self) -> State:\n part_states = []\n random.shuffle(self.blocks)\n placed = []\n t = 0\n\n for block in self.blocks:\n if 1 / (t + 1) >= random.random():\n part_states.append(PartState(f'on({block.arguments[0]},table)'))\n else:\n rand = random.randint(0, len(placed) - 1)\n part_states.append(PartState(f'on({block.arguments[0]},{placed[rand]})'))\n\n placed.append(block.arguments[0])\n t += 1\n\n return State(set(part_states))", "def get_random_start_state(self) -> State:\n if len(self.blocks) <= state_enumeration_limit:\n rnd = random.randint(0, len(self.allStates) - 1)\n return self.allStates[rnd]\n else:\n return self.generate_random_start_state()", "def get_model_count(self):\n return len(self._model_start_i)", "def get_starting_node(self, graph):\n return random.choice(list(graph.nodes))", "def get_random(self):\n return self._get_random()", "def get_available_models():\n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n return models", "def get_auto_start_import(self):\n\t\treturn self.checkAutoStartImport.get_active()", "def initLocalBestChoice(self):\n random.seed()\n return", "def availablemodels(self):\n return self.__models.keys()", "def random_start_probs(self) -> np.ndarray:\n return self.random_state.dirichlet(np.ones(self.n_states), size=1).flatten()", "def create_models(self):\n model_list = []\n for i in range(0, len(self.X.cluster.unique())):\n foo_model = self.model\n foo_model.set_params(**self.best_params_list[i])\n model_list.append(foo_model)\n return model_list", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def _ensure_gazebo_startup(self):\n model_base_name = \"__start_up_box__\"\n rospy.wait_for_message(self.param.topics.gazebo_models, ModelStates)\n i = 0\n while True:\n # Get all models currently listed in Gazebo's models\n # that contain the model_base_name in their name.\n boxes = list(b for b in self.model_names if model_base_name in b)\n\n if len(boxes) != 0:\n for b in boxes:\n self._remove_model(b)\n return\n i += 1\n self._spawn_model(f\"\"\"<model name=\"{model_base_name}{i}\"></model>\"\"\")\n rospy.sleep(0.1)\n rospy.wait_for_message(self.param.topics.gazebo_models, ModelStates)", "def model_start_date(self):\n return self._model_start_date", "def _get_start(self):\n return self._start", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\n util.raiseNotDefined()", "def getStartState(self):\r\n util.raiseNotDefined()", "def get_models(self):\n self.load()\n return self._models", "def sample_models(self):\n label_sets = []\n free_labels = xrange(self.label_count)\n \n while len(label_sets) < self.model_count:\n label_set = random.sample(free_labels, self.labelset_size)\n if label_set not in label_sets:\n label_sets.append(label_set)\n\n self.label_sets = label_sets", "def random(self):\n return self._random", "def list_model_runs(self):\n return sorted([x[\"name\"] for x in self._store.read_model_runs()])", "def starting_nodes(self):\n return self.starting_nodes_ #abstract requires this exists!", "def startRandom(session):\n rarity = Rarities.getRandomRarity()\n # Select a random quest from the database based on the random % rarity\n quest = None\n while True:\n # Rpeat this process until you get a not already active/valid/enabled quest\n quest = session.query(Quest).filter(Quest.rarity == rarity).order_by(func.random()).first()\n #TODO: isQuestActive()\n \n if quest.is_active == True and quest.isActive(session) == False:\n break\n\n activequest = QuestActive(time_stop = datetime.datetime.now() + datetime.timedelta(days = quest.interval_days),\n quest = quest\n )\n print(\"Started random quest %s\" % (quest.id))\n session.add(activequest)\n session.commit()\n return quest.id", "def init_model(self):\n pass", "def get_vehicle_start_index(self):\n return [vehicle.start_index for vehicle in self.vehicles]", "def get_model(self):\n # just return the first model, since all replicas are the same\n return self.call_async(0, '_async_get_model').gen()", "def whoGoesFirst(self):\n\t\treturn random.randint(1, 2)", "def starts(self):\n return self.time_start", "def get_model_list():\n with open(os.path.join(MODELS_FOLDER, \"models.json\"), \"r\") as model_file:\n model_list = json.load(model_file)\n return model_list", "def choose_random_site(self):\n return [self.rng.integers(low=0, high=self.num_spins - 1)]", "def get_Xc_starts(self):\r\n start_list = [np.random.randint(low=0, high=self.total_windows-self.buffer_needed) for _ in range(self.total_points)]\r\n return np.array(start_list)", "def start(self):\n return self.start_", "def get_best_models(self, num_models) -> Sequence[tf.keras.Model]:\n pass", "def get():\n\n return {'model_ids': mgmt.get_model_ids()}", "def random(self):\n\n return self._random", "def get_randoms_to_generate(self):\n return self._randoms_to_generate", "def run(seed, ModelClass=Model):\n model = ModelClass(random_seed=seed)\n return model.one_trial(1, 10)", "def get_start_state(self):\r\n util.raiseNotDefined()", "def _random_wait_start(self):\n\n # wait random amount of time to break synchron client behavior\n wait_until = time.time() + random.randint(0, self._config['random_wait_time'])\n while time.time() < wait_until and self._running:\n self._logger.debug(\"Waiting %.1fs to start first request\" % (wait_until - time.time()))\n time.sleep(1)", "def get_simpoint_start_insts(self) -> List[int]:\n return self._simpoint_start_insts", "def rand(self):\n return self.State.rand()", "def loadParts(self):\n for i in range(15):\n self.model_parts[i] = loadModel(\"ato_{}.pkl\".format(str(i)))" ]
[ "0.61098045", "0.5519804", "0.5495816", "0.5439313", "0.5439313", "0.54216105", "0.53990346", "0.53893465", "0.53497064", "0.53497064", "0.5334331", "0.53277075", "0.5320885", "0.5319373", "0.53144366", "0.5306886", "0.52948534", "0.5272874", "0.52695197", "0.5261325", "0.52470714", "0.5232264", "0.5231898", "0.5218057", "0.5217299", "0.52081203", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51753676", "0.51747364", "0.51502305", "0.51373976", "0.51302224", "0.51302224", "0.51302224", "0.51302224", "0.51302224", "0.51302224", "0.51302224", "0.51302224", "0.51302224", "0.5128879", "0.5116252", "0.51151675", "0.51151663", "0.5105412", "0.50930035", "0.50905335", "0.50885665", "0.5078994", "0.5078195", "0.50652343", "0.50623405", "0.50586665", "0.50474256", "0.5034483", "0.50339776", "0.5033278", "0.5029071", "0.50224674", "0.5013056", "0.5009587", "0.5008873", "0.5003241", "0.50031036", "0.50009394", "0.499109" ]
0.80309975
0
Gets the current hill_climbing_steps
def _get_hill_climbing_steps(self): self._validate_hill_climbing_steps() if self.hill_climbing_steps == "auto": if self._get_mode() == "Explain": return 0 if self._get_mode() == "Perform": return 2 if self._get_mode() == "Compete": return 2 if self._get_mode() == "Optuna": return 0 # all tuning is done in Optuna else: return deepcopy(self.hill_climbing_steps)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_steps(self):\n return self.steps", "def get_steps(self):\n return self.steps", "def get_steps(self):\n return self.steps", "def getSteps( self ):\n\n return self.adb.get( 'steps' )", "def getSteps():", "def get_step(self):\n return self.step", "def get_step(self):\n return self.step", "def get_workflow_steps(self):\n return self._data_dict[self.KEY_WF_STEPS]", "def getCurrentStep():", "def step(self):\n return self._step", "def raw_steps(self):\n return self.obj_payload[\"steps\"]", "def expansion_steps(self):\n return self._p", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def get_step(self) -> int:\n return self.step", "def get_view_steps(self):\n return self._data_dict[self.KEY_VIEW_STEPS]", "def getWorkflowSteps(self):\n\n return self.dbase.getProcessSteps(self.scene)", "def getStep():\n # TODO: can there be non-Step logs?", "def get_steps_num():\n return 0", "def step ( self ) :\n return self.__step", "def instruction_steps(self) -> Sequence['outputs.CodelessUiConnectorConfigPropertiesResponseInstructionSteps']:\n return pulumi.get(self, \"instruction_steps\")", "def cur_step(self):\n return self._cur_step", "def get_steps(self):\n return len(self.trajectory)", "def get_time_step(self):\n return self._time_step", "def _step(self) -> int:\n return self._config[CONF_STEP]", "def current_step(self) -> FlowNode:\n return self._current_step", "def steps(self) -> pulumi.Output[Sequence['outputs.StepResponse']]:\n return pulumi.get(self, \"steps\")", "def timeStep(self):\n return self.params['h']", "def current_time_step(self) -> ts.TimeStep:\n return self._current_time_step", "def get_step_conf(self):\n return self.step_conf", "def get_step(self):\n # decide which direction and how far\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def get_progression(self):\n return self._harmonies", "def step_id(self):\n return self._step_id", "def get_all_steps(self):\n steps = []\n steps.extend(self.init_workspace_steps())\n steps.extend(self.repos_clone_steps())\n steps.extend(self.cli_steps())\n steps.extend(self.prepare_mobilespec_steps())\n steps.extend(self.deploy_steps())\n return steps", "def availableSteps( self ):\n assert isinstance( self._level, int )\n assert isinstance( self._steps, list )\n assert isinstance( self._outter, Env ) or ( self._outter is None )\n\n lst = [ ]\n self._availableSteps( lst )\n return lst", "def get_last_step(self):\n return self.get_step_by_index(-1)", "def getStep(self, *args):\n return _CompuCell.Simulator_getStep(self, *args)", "def get_steps(self) -> list:\n ret_val = []\n for step_id in self:\n step_body = Steps.cache_step(step_id)\n if step_body is not None:\n ret_val.append(step_body)\n\n return ret_val", "def time_step(self):\n return self._time_step", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def active_step(self):\n if self._step_stack:\n return self._step_stack[-1]\n return None", "def steps(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StepArgs']]]]:\n return pulumi.get(self, \"steps\")", "def get_current_step(self):\n try:\n return self.get_step_by_id(self.current_step.id)\n except (AttributeError, ValueError):\n message = \"The current step for this ticket is not set.\"\n logger.debug(message)\n raise KeyError(message)", "def get_step(self):\n direction = choice([1,-1])\n direction = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def hill_climbing(problemHC):\n current = Node(problemHC.initial)\n while True:\n neighbors = current.expand(problemHC)\n if not neighbors:\n break\n #min instead of max\n neighbor = argmin_random_tie(neighbors,\n lambda node: problemHC.value(node.state))\n if problemHC.value(neighbor.state) >= problemHC.value(current.state):\n break\n current = neighbor\n return current.state", "def last_triggered_step(self):\n return self._last_triggered_step", "def steps_location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"steps_location\")", "def get_previous_step(self):\n return self.get_step_by_index(-2)", "def self_tracking(self):\n return self.tlwh()", "def GetTimeStep(self):\n time_step = None\n\n time_step = self._solver_collection.GetTimeStep()\n \n if not time_step is None:\n\n self.time_step = time_step\n\n return self.time_step", "def step(self) -> int:\n return self._step", "def get_step():\n\n # Decide which direction to go and how far to go in that direction.\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4, 5, 6, 7, 8])\n step = direction * distance\n\n # Reject moves that go nowhere.\n if step == 0:\n get_step()\n else:\n return step", "def get_first_step(self):\n return self.get_step_by_index(0)", "def get_time_walking(self):\n return self.time_step_to_enqueue - self.time_entered", "def currentLevel( self ):\n assert isinstance( self._env, Env )\n assert isinstance( self._steps, list )\n\n return self._env.level( )", "def step_id(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"step_id\")", "def GetAnimationStep(self):\r\n\r\n return self._animation_step", "def get_steps(steps):\n cexc.step_exceptions(steps)\n steps_int = int(steps)\n if steps_int > MAX_STEPS:\n steps_int = MAX_STEPS\n return steps_int", "def get_current_challenge_part(self):\n dist = max(self.get_global_total_distance_on_foot(), self.get_global_total_distance_on_bike())\n checkpoints = self._get_challenge_parts()\n\n result = 0\n for d in checkpoints.keys():\n if d <= dist:\n result = result + 1\n\n return '%02d' % result", "def get_break_points(self):\n\n return copy.deepcopy(self._break_points)", "def GetStepTaken(self):\n return _gmat_py.Propagator_GetStepTaken(self)", "def GetCGStepDir(self):\n if self.n_iter <= 1:\n self.hvec = self.mol.g_total\n gamma = 0.0\n else:\n v1 = self.traj.grad[-1] - self.traj.grad[-2]\n v1 = v1.reshape((1, const.NUMDIM * self.mol.n_atoms))\n v2 = self.traj.grad[-1].reshape((const.NUMDIM, self.mol.n_atoms, 1))\n gamma = numpy.linalg.norm(numpy.dot(v1, v2))\n gamma *= 1.0 / numpy.linalg.norm(self.traj.grad[-1])**2\n self.hvec = self.mol.g_total + gamma * self.hvec\n self.step_dir = self.hvec", "def get_walking_line(self):\n\t\treturn self._bottom_rect.move(0,1)", "def getProcessingHistoryList(context):\n projectDir = context.projectDir\n steps = []\n history = GenericMetadata._readEntriesForSection(projectDir, GenericMetadata.HISTORY_SECTION)\n try:\n idx = int(history['numsteps']) + 1\n for i in xrange(1, idx):\n key = GenericMetadata.HISTORY_PROTO + str(i)\n steps.append(history[key])\n except KeyError:\n pass\n \n return steps", "def directions(self):\n return self.piece_behavior.directions", "def num_steps(self) -> int:\n return self._num_steps", "def hill_climbing(problem):\n\n current = State(problem.initial_state)\n print(current.get_value())\n while current.get_value() != 0:\n neighbour = current.generate_neighbour()\n print(neighbour.board)\n print(neighbour.get_value())\n if neighbour.get_value() >= current.get_value():\n return current.board\n current = neighbour", "def get_goal(self):\n return self.get_observation(self.env._get_goal())", "def get_timesteps(self):\n return len(self.measurement_history)", "def get_pump_latched(self):\n return self.__latched_states[2]", "def previous_step_result(self):\n return self._previous_step_result", "def g(self):\n return self.moves", "def _get_departure_shift(self) -> tb.Workshift:\n return self.timeboard(self.departure_time - dt.timedelta(microseconds=1))", "def get_steps(dbpath):\n odb = openOdb(path=dbpath)\n _steps = []\n for _name,_stp in odb.steps.items():\n _time = _stp.timePeriod\n _procedure = _stp.procedure\n _nframes = len(_stp.frames)\n _steps.append((_name,_time,_nframes,_procedure))\n odb.close()\n return _steps", "def get_time_step_to_enqueue(self):\n return self.time_step_to_enqueue", "def current_operation(self):\n return GH_STATE_TO_HA[self._boiler.mode]", "def total_steps(self):\n return self.turns + (self.rounds*self.game_length)", "def steps_done(self):\n with _MonitorEnv._lock:\n return self._steps_done", "def hill_climbing(problem):\n current = Node(problem.initial)\n while True:\n neighbors = current.expand(problem)\n if not neighbors:\n break\n neighbor = argmax_random_tie(neighbors,\n key=lambda node: problem.value(node.state))\n if problem.value(neighbor.state) <= problem.value(current.state):\n break\n current = neighbor\n return current.state", "def wavelenstep(self):\n return self._wavelenstep", "def get_pump_tripped(self):\n return self.__pump_trip", "def get_chairs(self):\n return self.chairs", "def getSteps(fields, row):\n if 'steps' in fields:\n return int(row[fields.index('steps')])\n return None", "def step_name(self):\n return self._step_name", "def GetTimestepValues(self):\n if self.__timesteps is None: self.__timesteps = self.__SetInputTimesteps()\n # self.__timesteps should already be of type list\n return self.__timesteps if self.__timesteps is not None else None", "def get_cucumber(self):\n if not self._pickle_supported:\n raise NoPickleError()\n return self._cucumber", "def step_key(self) -> str:\n return self._step_execution_context.step.key", "def get_move(self):\n if self._difficulty == 0:\n return self._get_easy_move()\n else:\n # Different stategies/difficulties can be attached here\n return", "def horde_step(self, observation):", "def get_current_turn(self):\n return self._turn", "def n_steps(self) -> int:\n return len(self) - 1 # subtract the base metric", "def hklin(self):\n return self._hklin", "def get_current_turn(self):\n return self.turns.latest('number')", "def get_last_cycle_start(self):\n return self.data_provider.get_last_cycle_start()", "def step_down(self):\n # Propogate the deliberation_goal_votes down through the gearboxes\n agent_surprise = 0.0\n cable_goals = np.zeros((self.bundles_per_gearbox, 1))\n \n for gearbox in reversed(self.gearboxes):\n cable_goals = gearbox.step_down(cable_goals)\n if np.nonzero(gearbox.surprise)[0].size > 0:\n agent_surprise = np.sum(gearbox.surprise)\n # Tabulate and record the surprise registered in each gearbox\n self.recent_surprise_history.pop(0)\n self.recent_surprise_history.append(agent_surprise)\n self.typical_surprise = np.median(np.array(\n self.recent_surprise_history))\n mod_surprise = agent_surprise - self.typical_surprise\n self.surprise_history.append(mod_surprise)\n # Report the action that resulted for the current time step.\n # Strip the actions off the cable_goals to make \n # the current set of actions.\n action = cable_goals[:self.num_actions,:] \n return action", "def total_steps(self) -> global___Expression:", "def getPath(self):\r\n\t\treturn self.pathToGoal" ]
[ "0.72193843", "0.71010685", "0.70357335", "0.66529846", "0.652234", "0.6352011", "0.6352011", "0.6341539", "0.6029986", "0.600624", "0.59786874", "0.594206", "0.59252524", "0.59252524", "0.59252524", "0.59252524", "0.58734", "0.58724695", "0.58696485", "0.5851444", "0.5811696", "0.5811048", "0.5738428", "0.57373434", "0.5737148", "0.56261575", "0.55965716", "0.557876", "0.55633664", "0.5551416", "0.5549579", "0.5544684", "0.55301696", "0.55115604", "0.5467625", "0.5460574", "0.5455422", "0.54470986", "0.5432097", "0.54306984", "0.54288775", "0.54062986", "0.54062986", "0.5404801", "0.5399315", "0.53972346", "0.53468823", "0.5334535", "0.53174555", "0.5305646", "0.5257493", "0.5257421", "0.52514035", "0.5216142", "0.52005607", "0.5185096", "0.5180775", "0.516357", "0.51625544", "0.5161111", "0.5142188", "0.51364666", "0.5129147", "0.5125679", "0.5112661", "0.51076996", "0.51066655", "0.51039994", "0.5102683", "0.5099199", "0.50843644", "0.5082965", "0.50810194", "0.5070409", "0.50667113", "0.5062285", "0.50621086", "0.5061511", "0.5061403", "0.5059774", "0.50525403", "0.50494057", "0.5043954", "0.5040122", "0.50387913", "0.503404", "0.5033782", "0.50307995", "0.50240016", "0.49889946", "0.498864", "0.49762952", "0.49715963", "0.49675757", "0.49652928", "0.49608254", "0.49545604", "0.49474773", "0.49330884", "0.49308845" ]
0.7683898
0
Gets the current top_models_to_improve
def _get_top_models_to_improve(self): self._validate_top_models_to_improve() if self.top_models_to_improve == "auto": if self._get_mode() == "Explain": return 0 if self._get_mode() == "Perform": return 2 if self._get_mode() == "Compete": return 3 if self._get_mode() == "Optuna": return 0 else: return deepcopy(self.top_models_to_improve)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_top_models(self, return_scores=True):\n self.greater_score_is_better = is_greater_better(self.scoring_function)\n model_names = list(set([key.split('(')[0] for key in\n self.evaluated_individuals_.keys()]))\n models = OrderedDict({model: [] for model in model_names})\n for k in self.evaluated_individuals_:\n models[k.split('(')[0]].append(self.evaluated_individuals_[k])\n for model_name in model_names:\n models[model_name]=sorted(models[model_name],\n key=lambda x: x['internal_cv_score'],\n reverse=self.greater_score_is_better)\n self.models = models\n top_models = {model: models[model][0] for model in models}\n self.top_models = OrderedDict(\n sorted(top_models.items(),\n key=lambda x:x[1]['internal_cv_score'],\n reverse=self.greater_score_is_better))\n scores = {model: self.top_models[model]['internal_cv_score']\\\n for model in self.top_models}\n self.top_models_scores = OrderedDict(sorted(\n scores.items(), key=lambda x: x[1],\n reverse=self.greater_score_is_better))\n if return_scores:\n return self.top_models_scores\n else:\n return self.top_models", "def get_top_model(self):\n model = ModelHelper.load_model(filename_weight=self.top_model_path + '.h5',\n filename_model=self.top_model_path + '.json')\n\n return model", "def top(self):\r\n return self.topele", "def top(self):", "def get_best_model(self):\n return self.best_model", "def get_best_known_model(self) -> Tuple[Optional[Path], int]:\n return self._get_first_model(sort='total_score', desc=False)", "def top(self, **kwargs):\n return self.client.api.top(self.id, **kwargs)", "def get_top(self, model, limit=10, inverted=False):\n content_type= ContentType.objects.get_for_model(model)\n\n #Get a queryset of all the objects of the model. Get their scores\n results = self.filter(content_type=content_type).values('object_id').annotate(score=Sum('vote'))\n if inverted:\n results = results.order_by('score')\n else:\n results = results.order_by('-score')\n\n #We have a iterable list of objects of the requested model and their respective scores\n # Use in_bulk() to avoid O(limit) db hits.\n class_name = content_type.model_class()\n objects = class_name.objects.in_bulk([item['object_id'] for item in results[:limit]])\n\n # Yield each object, score pair. Because of the lazy nature of generic\n # relations, missing objects are silently ignored.\n\n for item in results[:limit]:\n id, score = item['object_id'], item['score']\n\n if not score:\n continue\n\n if int(id) in objects:\n yield objects[int(id)], int(score)", "def top(self):\n return self[0]", "def get_best_model_configs(self):\n self.best_models = {}\n with self.database:\n cur = self.database.cursor()\n for model in self.active_models:\n if self.tuning_depth == 'minimal':\n a = cur.execute(\"SELECT MAX(accuracy),unique_id from model_performance_results\")\n elif self.tuning_depth == 'normal':\n a = cur.execute(\"SELECT MAX(accuracy),unique_id from model_performance_results WHERE model = ?\",\n (model,))\n elif self.tuning_depth == 'maximal':\n a = cur.execute(\"SELECT MAX(accuracy),unique_id from model_performance_results WHERE model = ?\",\n (model,))\n # TODO not implimented, same as normal\n self.best_models[model] = list(a)[0][0]", "def get_latest_model():\n return get_models()[-1]", "def top(self):\n return self.List_store[len(self.List_store)]", "def top(self) -> Optional[FloatObject]:\n return self.get(\"/Top\", None)", "def get_best_known_model(cls, model_dir) -> Tuple[Optional[Path], int]:\n return cls._get_first_model(model_dir, sort='total_score', desc=False)", "def top(self):\n return self.q1.return_top()", "def get_best_models(self, num_models) -> Sequence[tf.keras.Model]:\n pass", "def top(self, **kwargs) -> Dict[str, Any]:", "def select_top_predictions(self, predictions):\n scores = predictions.get_field(\"scores\")\n keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)\n predictions = predictions[keep]\n scores = predictions.get_field(\"scores\")\n _, idx = scores.sort(0, descending=True)\n return predictions[idx]", "def select_top_predictions(self, predictions):\n scores = predictions.get_field(\"scores\")\n keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)\n predictions = predictions[keep]\n scores = predictions.get_field(\"scores\")\n _, idx = scores.sort(0, descending=True)\n return predictions[idx]", "def select_top_predictions(self, predictions):\n scores = predictions.get_field(\"scores\")\n keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)\n predictions = predictions[keep]\n scores = predictions.get_field(\"scores\")\n _, idx = scores.sort(0, descending=True)\n return predictions[idx]", "def get_top_pages(model=None):\n return get_page_children(page=None, model=model)", "def top(self):\n return self._top", "def top(self):\n return self._top", "def get_model_topN_accuracies(self):\n\n accuracy = {}\n filename = self._get_data_filename(\"test_eval.json\")\n\n with open(filename, \"r\") as f:\n results = json.loads(f.read())\n accuracy[\"top1\"] = _format_float(100 * (1.0 - float(results[\"average_top1_error\"])))\n accuracy[\"top5\"] = _format_float(100 * (1.0 - float(results[\"average_top5_error\"])))\n\n return accuracy", "def top(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"top\")", "def top(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"top\")", "def top(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"top\")", "def get_most_popular_merchants(self):\n if self.model:\n return self.model.wv.index_to_key[: self.num_rec]\n else:\n print(\"train the model before performing this step\")\n return None", "def __get_top_with_detail(self, result, top=10):\n result = result.sort_values(by=\"bias_score\", ascending=False).drop_duplicates(subset='productId', keep=\"first\")[\n :top]\n\n return result", "def top( self , i , j ):\n return self._get_top( i , j )", "def test_get_top_respects_limit(self):\r\n\r\n ranked_3_app = self.create_app_with_contributors(8, 0, name='three')\r\n ranked_2_app = self.create_app_with_contributors(9, 0, name='two')\r\n ranked_1_app = self.create_app_with_contributors(10, 0, name='one')\r\n ranked_4_app = self.create_app_with_contributors(7, 0, name='four')\r\n\r\n top_apps = cached_apps.get_top(n=2)\r\n\r\n assert len(top_apps) is 2, len(top_apps)", "def solution(model, top_p, top_idx, category_names):\n top_p = top_p[0].cpu().detach().numpy()\n\n top_cats_names = []\n for idx in top_idx[0].cpu().detach().numpy():\n cat = model.idx_to_class[str(idx)]\n name = cat_to_name(cat, category_names)\n top_cats_names.append(name)\n\n return top_p, top_cats_names", "def top(self):\n return self.get_cards()[-1]", "def top(self):\n\n return self._top", "def top_layer(self):\n return self._top", "def top_coded(self):\n qs = self.model_qs().exclude(**{self.foreign_key_id_field_name: None})\n\n fk_id = qs.values(self.foreign_key_id_field_name)\n fk_id = fk_id.annotate(\n counted_fk_field=Count(self.foreign_key_id_field_name)\n )\n fk_id = fk_id.order_by(\"-counted_fk_field\")[:self.TOP_AMOUNT]\n\n return fk_id.values_list(\n \"{}_fk__name\".format(self.field_name), \"counted_fk_field\"\n )", "def top(self) -> object:\n return self._data[-1]", "def top(self) -> object:\n return self._data[-1]", "def get_top_models(basedir, prob_dirs, beta, nM=3):\n # initialize\n topmodels = []\n # process rest of data regions for same models\n for inferdir in sorted(prob_dirs.keys()):\n # set focus directory\n focusdir = os.path.join(basedir, prob_dirs[inferdir])\n # obtain model probabilities\n modelprobs = get_model_probs(focusdir, float(beta))\n \n for em in sorted(modelprobs, key=modelprobs.get, reverse=True)[0:nM]:\n if em not in topmodels:\n # add model if not already in list\n topmodels.append(em)\n \n return topmodels", "def top(self):\n # type: () -> float\n return self._top", "def get_top_predictions(preds, top=5):\n results = []\n for pred in preds:\n top_indices = pred.argsort()[-top:][::-1]\n # result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]\n # result.sort(key=lambda x: x[2], reverse=True)\n # results.append(result)\n return top_indices", "def load_best_model():\r\n best_model = LSTM_load_best_model()\r\n return best_model", "def top_uncoded(self):\n qs = self.model_qs().exclude(**{self.free_text_field_name: ''})\n top_ft = qs.annotate(\n lower_ft_field=Lower(self.free_text_field_name)\n )\n top_ft = top_ft.values(\"lower_ft_field\")\n top_ft = top_ft.annotate(\n counted_ft_field=Count(\"lower_ft_field\")\n )\n top_ft = top_ft.order_by(\"-counted_ft_field\")[:self.TOP_AMOUNT]\n\n return top_ft.values_list(\"lower_ft_field\", \"counted_ft_field\")", "def load_top_model(architecture, img_size, n_classes, top_weights_path):\r\n # TODO: Use top model weights from fine tuning weights for model initialization\r\n classifier = TransferModel()\r\n classifier.build_base_model(architecture, [img_size, img_size], 3)\r\n classifier.add_top_model(n_classes)\r\n classifier.load_top_weights(top_weights_path)\r\n logger.debug(\"Loaded \" + architecture + \" top model.\")\r\n return classifier", "def get_top_view():\n topView = RedisHelper.get_cache(KEY_TOP_VIEW)\n if RedisHelper.is_cache_exist(KEY_TOP_VIEW) is False:\n topView = list(Comment.objects.raw(SQL_VIEW_TOP))\n RedisHelper.create_cache(KEY_TOP_VIEW, topView, RedisTimeOut.REDIS_TIMEOUT_5_MIN)\n return topView", "def recommend_k_items(self, x, k, remove_seen=True):\n # return optimal model\n self.model.load_weights(self.save_path)\n\n # obtain scores\n score = self.model.predict(x)\n\n if remove_seen:\n # if true, it removes items from the train set by setting them to zero\n seen_mask = np.not_equal(x, 0)\n score[seen_mask] = 0\n # get the top k items\n top_items = np.argpartition(-score, range(k), axis=1)[:, :k]\n # get a copy of the score matrix\n score_c = score.copy()\n # set to zero the k elements\n score_c[np.arange(score_c.shape[0])[:, None], top_items] = 0\n # set to zeros all elements other then the k\n top_scores = score - score_c\n return top_scores", "def getTopComments(self):\n return self.topComments", "def get_top(self, Model, limit=10, reversed=False):\r\n ctype = ContentType.objects.get_for_model(Model)\r\n query = \"\"\"\r\n SELECT object_id, SUM(vote) as %s\r\n FROM %s\r\n WHERE content_type_id = %%s\r\n GROUP BY object_id\"\"\" % (\r\n qn('score'),\r\n qn(self.model._meta.db_table),\r\n )\r\n\r\n # MySQL has issues with re-using the aggregate function in the\r\n # HAVING clause, so we alias the score and use this alias for\r\n # its benefit.\r\n if settings.DATABASE_ENGINE == 'mysql':\r\n having_score = qn('score')\r\n else:\r\n having_score = 'SUM(vote)'\r\n if reversed:\r\n having_sql = ' HAVING %(having_score)s < 0 ORDER BY %(having_score)s ASC %(limit_offset)s'\r\n else:\r\n having_sql = ' HAVING %(having_score)s > 0 ORDER BY %(having_score)s DESC %(limit_offset)s'\r\n query += having_sql % {\r\n 'having_score': having_score,\r\n 'limit_offset': connection.ops.limit_offset_sql(limit),\r\n }\r\n\r\n cursor = connection.cursor()\r\n cursor.execute(query, [ctype.id])\r\n results = cursor.fetchall()\r\n\r\n # Use in_bulk() to avoid O(limit) db hits.\r\n objects = Model.objects.in_bulk([id for id, score in results])\r\n\r\n # Yield each object, score pair. Because of the lazy nature of generic\r\n # relations, missing objects are silently ignored.\r\n for id, score in results:\r\n if id in objects:\r\n yield objects[id], int(score)", "def predict_items(self, users=None, top=None):\n\n # Get shape of the matrix\n num_users, num_items = self.shape\n\n if users is not None:\n num_users = len(users)\n\n # Return top-k recommendations\n if top is not None:\n num_items = top\n\n tempItemsAvg = self.bias['globalAvg'] + self.bias['dItems'][:num_items]\n return np.array([tempItemsAvg,] * num_users)", "def top(self):\n return self.a[-1]", "def get_top(self, Model, limit=10, reversed=False):\n ctype = ContentType.objects.get_for_model(Model)\n query = \"\"\"\n SELECT object_id, SUM(vote) as %s\n FROM %s\n WHERE content_type_id = %%s\n GROUP BY object_id\"\"\" % (\n connection.ops.quote_name('score'),\n connection.ops.quote_name(self.model._meta.db_table),\n )\n\n # MySQL has issues with re-using the aggregate function in the\n # HAVING clause, so we alias the score and use this alias for\n # its benefit.\n if settings.DATABASE_ENGINE == 'mysql':\n having_score = connection.ops.quote_name('score')\n else:\n having_score = 'SUM(vote)'\n if reversed:\n having_sql = ' HAVING %(having_score)s < 0 ORDER BY %(having_score)s ASC LIMIT %%s'\n else:\n having_sql = ' HAVING %(having_score)s > 0 ORDER BY %(having_score)s DESC LIMIT %%s'\n query += having_sql % {\n 'having_score': having_score,\n }\n\n cursor = connection.cursor()\n cursor.execute(query, [ctype.id, limit])\n results = cursor.fetchall()\n\n # Use in_bulk() to avoid O(limit) db hits.\n objects = Model.objects.in_bulk([id for id, score in results])\n\n # Yield each object, score pair. Because of the lazy nature of generic\n # relations, missing objects are silently ignored.\n for id, score in results:\n if id in objects:\n yield objects[id], int(score)", "def top(self) -> int:\n return self.topvalue", "def top(self) -> int:\n return self.topEle", "def top(self):\r\n\r\n if self.is_empty():\r\n pass\r\n\r\n else:\r\n return self._data[0]", "def set_best_model(self):\n if (self.metric == 'bic'):\n self.best_gmm = self.best_gmm_bic\n elif(self.metric == 'aic'):\n self.best_gmm = self.best_gmm_aic", "def get_item_based_topk(self, items, top_k=10, sort_top_k=False):\n\n # convert item ids to indices\n item_ids = items[self.col_item].map(self.item2index)\n\n # if no ratings were provided assume they are all 1\n if self.col_rating in items.columns:\n ratings = items[self.col_rating]\n else:\n ratings = pd.Series(np.ones_like(item_ids))\n\n # create local map of user ids\n if self.col_user in items.columns:\n test_users = items[self.col_user]\n user2index = {x[1]: x[0] for x in enumerate(items[self.col_user].unique())}\n user_ids = test_users.map(user2index)\n else:\n # if no user column exists assume all entries are for a single user\n test_users = pd.Series(np.zeros_like(item_ids))\n user_ids = test_users\n n_users = user_ids.drop_duplicates().shape[0]\n\n # generate pseudo user affinity using seed items\n pseudo_affinity = sparse.coo_matrix(\n (ratings, (user_ids, item_ids)), shape=(n_users, self.n_items)\n ).tocsr()\n\n # calculate raw scores with a matrix multiplication\n test_scores = pseudo_affinity.dot(self.item_similarity)\n\n # remove items in the seed set so recommended items are novel\n test_scores[user_ids, item_ids] = -np.inf\n\n top_items, top_scores = get_top_k_scored_items(scores=test_scores, top_k=top_k, sort_top_k=sort_top_k)\n\n df = pd.DataFrame(\n {\n self.col_user: np.repeat(test_users.drop_duplicates().values, top_items.shape[1]),\n self.col_item: [\n self.index2item[item] for item in top_items.flatten()\n ],\n self.col_prediction: top_scores.flatten(),\n }\n )\n\n # drop invalid items\n return df.replace(-np.inf, np.nan).dropna()", "def top(self):\n try:\n if self.size() > 0:\n return self.items[len(self.items) - 1]\n else:\n raise IndexError('Cannot get top item, stack is empty.')\n except IndexError as err:\n print(err)\n raise", "def test_get_top_doesnt_return_hidden_apps(self):\r\n\r\n ranked_3_app = self.create_app_with_contributors(8, 0, name='three')\r\n ranked_2_app = self.create_app_with_contributors(9, 0, name='two')\r\n ranked_1_app = self.create_app_with_contributors(10, 0, name='one')\r\n hidden_app = self.create_app_with_contributors(11, 0, name='hidden')\r\n hidden_app.hidden = 1\r\n db.session.add(hidden_app)\r\n db.session.commit()\r\n\r\n top_apps = cached_apps.get_top()\r\n\r\n assert len(top_apps) is 3, len(top_apps)\r\n for app in top_apps:\r\n assert app['name'] != 'hidden', app['name']", "def load_best_model(self) -> None:\n self.resume()", "def top(self) -> int:\n return self.q[0]", "def top(self) -> int:\n return self.q[0]", "def top(self) -> int:\n return self.q[0]", "def top(self):\n \n assert not self.empty()\n return self._s[-1]", "def get_top_topics(self, model_name, data):\n\n if model_name == 'lda':\n return list(self.lda_model.top_topics(data))\n elif model_name == 'lsa':\n return list(self.lsa_model.print_topics(num_topics= 10))", "def __get_top(self, result, top=10):\n result = result.sort_values(by=\"bias_score\", ascending=False).drop_duplicates(subset='productid', keep=\"first\")\n print(result)\n result = result[:top].sort_values(by=\"final_score\", ascending=False).productid\n\n return list(result)", "def get_top():\n print(\"This processes are using the cpu the most:\")\n print(os.system(\"ps axo %cpu,pid,euser,cmd | sort -nr | head -n 5\"))", "def test_most_similar_topn(self):\n self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=5)), 5)\n self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=10)), 10)\n\n predicted = self.vectors.most_similar('dog.n.01', topn=None)\n self.assertEqual(len(predicted), len(self.vectors.vocab) - 1)\n self.assertEqual(predicted[-1][0], 'gallant_fox.n.01')", "def model_selection_statistics(self):\n return None", "def get_n_best(self):\n pass", "def test_get_top_returns_apps_with_most_taskruns(self):\r\n\r\n ranked_3_app = self.create_app_with_contributors(8, 0, name='three')\r\n ranked_2_app = self.create_app_with_contributors(9, 0, name='two')\r\n ranked_1_app = self.create_app_with_contributors(10, 0, name='one')\r\n ranked_4_app = self.create_app_with_contributors(7, 0, name='four')\r\n\r\n top_apps = cached_apps.get_top()\r\n\r\n assert top_apps[0]['name'] == 'one', top_apps\r\n assert top_apps[1]['name'] == 'two', top_apps\r\n assert top_apps[2]['name'] == 'three', top_apps\r\n assert top_apps[3]['name'] == 'four', top_apps", "def get_top(self):\n\n sql = \"SELECT username FROM Users ORDER BY score DESC LIMIT 10\"\n self.cursor.execute(sql)\n return self.cursor.fetchall()", "def getTopPreps(self) -> list:\n top_prep_list= []\n for x in self._top_preps:\n top_prep_list.append(x)\n return top_prep_list", "def get_all_top(self):\n return self.execute(TABELLE['top']['select']['all'])", "def top(self, value):\n\n pass", "def top_boys(self):\n return [boy for boy in self._db.boys.find().sort('rating', pymongo.DESCENDING).limit(5)]", "def query_top_recommended(cls,N=10):\n brkey = 'BooksMostRecommended'\n bks = from_cache(brkey)\n if not bks:\n bks = map(lambda e:str(e.id()), SuiBook.all(keys_only=True).order('-recommends').fetch(N))\n to_cache(brkey,bks)\n return bks", "def get_top(self, difficulty):\n return list(self.database[difficulty].find().sort('time').limit(MAX_LEADERS))", "def top(self):\n if self.is_empty():\n raise RuntimeError(\"Attempt to get a top of the empty stack!\")\n return self._items[-1]", "def get_best(self):\n scores, ids = self.sort_best()\n return scores[1], ids[1]", "def showBestBetUse(self) :\n bestBetUse = 0\n for level in self.level_history :\n bestBetUse = level.bet if bestBetUse < level.bet else bestBetUse\n Scenario.messageGetBestBetUse(bestBetUse)", "def findBestModel(X_train, X_test, Y_test, model='iForest'):\n if model == 'iForest':\n total_score = 0;\n parameters = [0,0,0,0]\n for max_features in range(1,X_train.shape[1]+1):\n for contamination in range(1,101):\n iForest = IsolationForest(n_estimators = 100, max_features = max_features, contamination = contamination/1000, random_state = 0).fit(X_train)\n \n scores = []\n for x_test,y_test in zip(X_test,Y_test):\n y_hat = iForest.predict(x_test)\n score = evaluate(y_test,y_hat) # returns similarity percentage\n scores.append(score)\n \n if sum(scores) > total_score:\n total_score = sum(scores)\n parameters[0] = max_features\n parameters[1] = contamination/1000\n parameters[2] = total_score\n parameters[3] = scores\n print(parameters, contamination)\n \n return parameters", "def peek(self):\n mostest_importantestest = self.dict[min(self.dict.keys())][0]\n return mostest_importantestest", "def get_best_individual(self):\n return self._best_indv", "def best_B(Ag):\n top = 0\n for i in range(len(Ag)):\n etop = np.min(cf.TD20[int(Ag[i]) - 1])\n top += etop\n return top", "def select(self):\n\t\tbest_num_components = self.n_constant\n\t\treturn self.base_model(best_num_components)", "def top(self):\n return self.arr[-1]", "def get_best_model(self, d_model_info, metric='F1', delta_auc_th=0.03, verbose=False):\n # select valid models (abs(auc_train - auc_test)<0.03)\n valid_model = {}\n for key, param in d_model_info.items():\n if param['metrics']['delta_auc'] <= delta_auc_th:\n valid_model[key] = param\n\n # Best model according to selected metric\n if len(valid_model.keys()) > 0:\n best_model_idx = max(valid_model, key=lambda x: valid_model[x].get('metrics').get(metric))\n if verbose:\n print(' >', len(valid_model.keys()), ' valid models |auc(train)-auc(test)|<=' + str(delta_auc_th))\n print(' > best model : ' + str(best_model_idx))\n else:\n best_model_idx = None\n print('0 valid model')\n\n return best_model_idx, list(valid_model.keys())", "def get_best_for_models(results, models, w_ft=-0.25, w_mt=-10, w_f1=3.5, f1_type='macro', is_test=False):\n best_results = []\n\n for model in models:\n model_results = metrics.filter_results(results, models=[model], is_test=is_test)\n __sort_by_score(model_results, w_ft, w_mt, w_f1, f1_type)\n\n best_results.append(model_results[0])\n\n return best_results", "def _load_best_model(self) -> None:\n self.trainer.resume()", "def train_best_model():\r\n best_model = LSTM_train_best_model()\r\n return best_model", "def get_optimums(self, model, size, excludes):\n raise NotImplementedError", "def __findBestLogProbability(self):\n best_model = None\n highest_log_probability = -sys.maxsize# (np.finfo(float).eps)\n\n # Find the highest model\n for item in self.data_array:\n if item[1] > highest_log_probability:\n best_model = item\n highest_log_probability = item[1]\n\n return best_model", "def getHighestRank_Toilet(self):\n\n # filter out low confidences\n #maxConfidence = max(self.Predictors, key=operator.attrgetter('confidence'))\n #p = [p for p in self.Predictors if p.confidence == maxConfidence]\n \n \n p = self.Predictors\n \n if len(p) == 1:\n # only one predictor has high confidence\n chosenPredictor = p[0]\n elif len(p) > 1:\n random.shuffle(p, random = rps.randomRange)\n \n # drop the first 37% and grab the best \n drop = round(len(p) * 0.37) - 1\n initial = p[:drop]\n maxConfidence = max(initial, key=operator.attrgetter('confidence'))\n maxConfidence = maxConfidence.confidence\n \n toCheck = p[drop:]\n for p in toCheck:\n if p.confidence >= maxConfidence:\n chosenPredictor = p\n break\n else:\n chosenPredictor = toCheck[-1]\n \n rankConfidence = chosenPredictor.confidence\n return chosenPredictor, rankConfidence", "def suggested_search(search_text):\n threshold = 0.6\n global model\n\n search_text = remove_stop_words(search_text)\n tmp_search = search_text.split()\n\n new_search = []\n for word in tmp_search:\n similar_words = get_similar_words(model, word)\n new_search = select_top_words(similar_words, new_search, threshold)\n\n new_search = list(set(new_search))\n new_search = ' '.join(new_search)\n\n return new_search + ' ' + search_text", "def train_top_model(architecture, img_size, batch_size, best_top_weights_path, top_epochs_arr, top_learn_rates,\r\n preprocessor, separate_top_model=False):\r\n # Create a checkpoint, for best model weights\r\n checkpoint_top = ModelCheckpoint(best_top_weights_path, monitor='val_loss', verbose=1, save_best_only=True)\r\n\r\n x_train, y_train = preprocessor.X_train, preprocessor.y_train\r\n x_valid, y_valid = preprocessor.X_val, preprocessor.y_val\r\n train_generator = preprocessor.get_train_generator(batch_size)\r\n steps = len(x_train) / batch_size\r\n\r\n img_resize = (img_size, img_size)\r\n\r\n # Define and Train model\r\n n_classes = len(preprocessor.y_map)\r\n\r\n logger.info(\"Training dense top model.\")\r\n classifier = TransferModel(separate_top_model)\r\n logger.debug(\"Classifier initialized.\")\r\n classifier.build_base_model(architecture, img_resize, 3)\r\n logger.info(\"Base model \" + architecture + \" built.\")\r\n classifier.add_top_model(n_classes)\r\n\r\n if separate_top_model:\r\n # Use separate model structure to run base_model only once\r\n logger.info(\"Calculating bottleneck features for separate top model.\")\r\n bottleneck_feat_trn = classifier.predict_bottleneck_features_gen(train_generator, steps)\r\n bottleneck_feat_val = classifier.predict_bottleneck_features(x_valid)\r\n logger.debug(\"y_train_gold shape: {}\".format(np.array(y_train).shape))\r\n logger.debug(\"y_valid shape: {}\".format(np.array(y_valid).shape))\r\n logger.debug(\"bottleneck_feat_trn shape: {}\".format(bottleneck_feat_trn.shape))\r\n logger.debug(\"bottleneck_feat_val shape: {}\".format(bottleneck_feat_val.shape))\r\n logger.debug(\"Expected bottleneck shape: {}\".format(classifier.model.output_shape[1:]))\r\n logger.info(\"Top built, bottleneck features calculated, ready to train.\")\r\n else:\r\n classifier.disable_base_model_training()\r\n logger.info(\"Top built, ready to train.\")\r\n\r\n train_losses, val_losses = [], []\r\n start = time.time()\r\n for learn_rate, epochs in zip(top_learn_rates, top_epochs_arr):\r\n if separate_top_model:\r\n tmp_train_losses, tmp_val_losses, fbeta_score = classifier.train_top_model(\r\n bottleneck_feat_trn=bottleneck_feat_trn,\r\n y_train=np.array(y_train),\r\n bottleneck_feat_val=bottleneck_feat_val,\r\n y_valid=y_valid,\r\n learn_rate=learn_rate,\r\n epoch=epochs,\r\n batch_size=batch_size,\r\n train_callbacks=[checkpoint_top]\r\n )\r\n else:\r\n tmp_train_losses, tmp_val_losses, fbeta_score = classifier.train_gen(\r\n train_generator=train_generator,\r\n steps=steps,\r\n x_valid=x_valid,\r\n y_valid=y_valid,\r\n learn_rate=learn_rate,\r\n epochs=epochs,\r\n train_callbacks=[checkpoint_top]\r\n )\r\n train_losses += tmp_train_losses\r\n val_losses += tmp_val_losses\r\n\r\n logger.info(\"learn_rate : \" + str(learn_rate))\r\n logger.info(\"epochs : \" + str(epochs))\r\n logger.info(\"fbeta_score : \" + str(fbeta_score))\r\n logger.info(\"classification_threshold : \" + str(classifier.classification_threshold))\r\n\r\n end = time.time()\r\n t_epoch = float(end - start) / sum(top_epochs_arr)\r\n logger.info(\"Training time [s/epoch]: \" + str(t_epoch))\r\n # Load Best Weights saved by ModelCheckpoint\r\n if separate_top_model:\r\n classifier.load_top_weights(best_top_weights_path)\r\n else:\r\n classifier.load_weights(best_top_weights_path)\r\n # Look at our fbeta_score\r\n fbeta_score = classifier._get_fbeta_score(classifier, x_valid, y_valid)\r\n logger.info(\"Best top model F2: \" + str(fbeta_score))\r\n\r\n # Store losses\r\n np.save(\"top_train_losses.npy\", train_losses)\r\n np.save(\"top_tval_losses.npy\", val_losses)\r\n # Plot losses\r\n plt.plot(train_losses, label='Training loss')\r\n plt.plot(val_losses, label='Validation loss')\r\n plt.legend()\r\n plt.savefig('top_loss.png')\r\n\r\n return classifier", "def predict_currword(word, top_n=10):\r\n try:\r\n return [\r\n (k, v) for k, v in model.WORDS_MODEL.most_common() if k.startswith(word)\r\n ][:top_n]\r\n except KeyError:\r\n raise Exception(\r\n \"Please load predictive models. Run:\\\r\n \\n\\tautocomplete.load()\"\r\n )", "def get_top_rating_service(top, offset, uid):\n if top is None or not top.isdigit():\n top = DEFAULT_TOP\n if offset is None or not offset.isdigit():\n offset = DEFAULT_OFFSET\n return update_protocol_for_a_list_of_dict('image', rating_dao.get_top_rating_dao(int(top), int(offset)), uid)", "def GetPts(self):\n return self.best", "def select_top_predictions(predictions, confidence_threshold=0.7, score_field=\"scores\"):\n scores = predictions.get_field(score_field)\n keep = torch.nonzero(scores > confidence_threshold).squeeze(1)\n if len(keep) == 0:\n return []\n predictions = predictions[keep]\n scores = predictions.get_field(score_field)\n _, idx = scores.sort(0, descending=True)\n return predictions[idx]", "def least_popular_influencers(self, influencerTopSim, count):\n infPopularity = {influencer: 0 for influencer in influencerTopSim}\n for influencer in influencerTopSim:\n infTweetPop = self.userTweetsStat[influencer]\n avgPop = []\n for tweet in influencerTopSim[influencer]:\n infTweet = infTweetPop[len(infTweetPop)-1]\n avgPop.append(self.assign_popularity_to_tweet(infTweet,tweet))\n infPopularity[influencer] = np.mean(avgPop)\n \n tmp = {key: rank for rank, key in enumerate(sorted(set(infPopularity.values()), reverse=True), 1)}\n rankInfluencer = {k: tmp[v] for k,v in infPopularity.items()}\n leastPopInfluencer = [a for a in dict(sorted(rankInfluencer.items(), key=operator.itemgetter(1), reverse=True)[:count]).keys()]\n \n return leastPopInfluencer" ]
[ "0.6821835", "0.6430581", "0.629004", "0.62063706", "0.6155155", "0.6126578", "0.5943312", "0.59303916", "0.59013474", "0.5836023", "0.5798655", "0.5795692", "0.56650555", "0.565281", "0.56480753", "0.5645917", "0.56216484", "0.55647224", "0.55647224", "0.55647224", "0.55510247", "0.55452687", "0.55452687", "0.5527156", "0.5524048", "0.5524048", "0.5524048", "0.54983", "0.5487157", "0.5482859", "0.54766655", "0.5469887", "0.5458636", "0.5445851", "0.5419991", "0.5419329", "0.5408097", "0.5408097", "0.54059535", "0.53959155", "0.5391013", "0.53890437", "0.5385969", "0.5377933", "0.53754467", "0.53698003", "0.5367454", "0.5364083", "0.5339799", "0.53343177", "0.53235024", "0.53139436", "0.53123206", "0.5305618", "0.5305193", "0.52964395", "0.5295991", "0.527701", "0.5276212", "0.5264648", "0.5264648", "0.5264648", "0.52642834", "0.5256064", "0.52501285", "0.52469367", "0.5243567", "0.52364516", "0.523414", "0.5225277", "0.5218481", "0.5215991", "0.5210914", "0.5209269", "0.520764", "0.5199624", "0.51920766", "0.51901", "0.51829314", "0.5179633", "0.51785994", "0.51779085", "0.5177806", "0.51775694", "0.51563084", "0.51563084", "0.5154894", "0.51472616", "0.5145002", "0.5127489", "0.5125719", "0.5120824", "0.5115615", "0.5114411", "0.5111299", "0.5107282", "0.51021314", "0.5099838", "0.5098546", "0.50898683" ]
0.8301511
0
Gets the current boost_on_errors
def _get_boost_on_errors(self): self._validate_boost_on_errors() if self.boost_on_errors == "auto": val = self._get_validation_strategy() if val.get("validation_type", "") == "custom": return False if self._get_mode() == "Explain": return False if self._get_mode() == "Perform": return False if self._get_mode() == "Compete": return True if self._get_mode() == "Optuna": return False else: return deepcopy(self.boost_on_errors)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_raise_on_error():\n global raise_on_error\n return raise_on_error", "def back_err(self):\n return self._derived_properties[\"bkgd_err\"]", "def get_errors(self):\n return {'loss': self.loss.data[0]}", "def getB(self):\n return self.error", "def getErrorHandler(self):\n pass", "def check_errors(self) -> None:\n # TODO check the manual for error codes & interpert them.\n return self.send(self.cmd.GET_GLOBALSTATUS_CURRENTERROR)", "def errors(self):\n return self.__errors", "def get_error_log(self) -> Any:\n return self.err", "def getErrors(self):\n return self.errors", "def error(self):\n return self['error']", "def failed_on(self):\n return self._failed_on", "def errors(self):\n return self._errors", "def errors (self):\n return self._errors", "def errors (self):\n return self._errors", "def Errors(self):\r\n\t\treturn self._get_attribute('errors')", "def error(self):\n return self._error", "def error(self):\n return self._error", "def error(self):\n return self._error", "def errorbars (self):\n return self._errorbars", "def errors(self) -> pulumi.Output[Sequence['outputs.BatchAIErrorResponse']]:\n return pulumi.get(self, \"errors\")", "def get_error(self):\n return self.exc_info", "def getErrorLog(self):\n return _libsbml.SBMLValidator_getErrorLog(self)", "def validation_errors(self):\n return self._validation_errors", "def errors():\n return THE_LOGGER.errors", "def error(self) -> list:\n return self.__err", "def error(self):\n return self.get('error')", "def Errors(self):\n return self._get_attribute('errors')", "def on_failure(self):\n return self._on_failure", "def errors(self):\n raise NotImplementedError", "def geterr():\n return __errprof.state.copy()", "def getErrorsList(self):\n return self.__errors", "def df_err(self):\n return self._df_err", "def getErrorLog(self):\n return _libsbml.XMLInputStream_getErrorLog(self)", "def error_map(self):\n return self._error_map", "def errors(self):\n return self.args[1]", "def errors(self):\n return self._properties.get(\"errors\")", "def _mn_get_errdef_ ( self ) :\n return _mn_stat_ ( self ) ['ERRDEF']", "def get_error(self):\n return self.e", "def xerr(self, i):\n return self.errors[0][i]", "def get_encoding_errors(self):\n return self._encoding_errors", "def bisect_batch_on_function_error(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"bisect_batch_on_function_error\")", "def error(self):\n return self._decorator_wrapper(EventName.error)", "def getErrorHandler(self):\n return libxml2mod.xmlParserCtxtGetErrorHandler(self._o)", "def std_err(self):\n return self._std_err", "def getExceptions(self):\n return self.getOrDefault(\"exceptions\")", "def has_error(self):\n return self.gripper_io.get_signal_value(\"has_error\")", "def getBuildErrors(self):\n return [x for x in self.xeps if x.buildErrors]", "def get_errors(self):\n return [result for result in self.values() if result.outcome == Result.ERROR]", "def retrieve_error_messages(self):\n return self.errors_seen[:]", "def has_error(self):\n return self.error_found", "def get_beam_errors(self):\n\n lsize = 2 * self.lmax + 1\n nspec = 6 if self.pol else 1\n beam_shape = (self.num_maps * nspec, lsize)\n\n save_name = \"beam_errors\"\n cp = \"beam_errors\"\n\n if hasattr(self, \"beam_errors\") and not self.force_rerun[cp]:\n return self.beam_errors\n\n ret = self.load_data(\n save_name, cp, fields=[\"beam_errors\"], to_attrs=True, shape=beam_shape\n )\n if ret is not None:\n return ret[\"beam_errors\"]\n\n beam_errors = OrderedDict()\n beam_errors[\"tt\"] = OrderedDict()\n if self.pol:\n for s in [\"ee\", \"bb\", \"te\", \"eb\", \"tb\"]:\n beam_errors[s] = OrderedDict()\n\n for tag, otag in zip(self.map_tags, self.map_tags_orig):\n if otag in self.beam_error_product:\n be = self.beam_error_product[otag]\n elif otag in self.fwhm_err:\n # convert error on the FWHM to an envelope error on the beam window\n fwhm = self.fwhm[otag]\n bl = self.beam_windows[\"tt\"][tag]\n blp = hp.gauss_beam(\n fwhm * (1 - self.fwhm_err[otag]), lsize - 1, self.pol\n )\n blm = hp.gauss_beam(\n fwhm * (1 + self.fwhm_err[otag]), lsize - 1, self.pol\n )\n if self.pol:\n bl = np.asarray(\n [bl, self.beam_windows[\"ee\"][tag], self.beam_windows[\"te\"][tag]]\n )\n blp = blp.T[[0, 1, 3]]\n blm = blm.T[[0, 1, 3]]\n be = (blp - blm) / 2.0 / bl\n else:\n raise ValueError(\"No beam in config for {}\".format(otag))\n\n be = np.atleast_2d(be)[:, :lsize]\n beam_errors[\"tt\"][tag] = np.copy(be[0])\n if self.pol:\n for s in [\"ee\", \"bb\", \"eb\"]:\n beam_errors[s][tag] = np.copy(be[1])\n for s in [\"te\", \"tb\"]:\n beam_errors[s][tag] = np.copy(be[2])\n\n # save and return\n self.beam_errors = beam_errors\n self.save_data(save_name, from_attrs=[\"beam_errors\"])\n return self.beam_errors", "def bisect_batch_on_function_error(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"bisect_batch_on_function_error\")", "def getNumErrors(self):\n return _libsbml.XMLErrorLog_getNumErrors(self)", "def error(self):\n ...", "def refined_errors(self):\r\n errs = []\r\n for err in self.errors:\r\n if err['typo'].lower() not in self.terms:\r\n errs.append(err)\r\n return errs", "def get_error(self) -> List[str]:\n return []", "def get_error(self) -> List[str]:\n return []", "def err(self):\n return self._err.getvalue()", "def query_error(self):\n return self.details[KEY_QUERY_ERROR]", "def error(self):\n pass", "def in_crc_errors(self) -> str:\n return self._in_crc_errors", "def error_handler(self):\n return self._error_handler", "def errors(self) -> List[Error]:", "def traceback(self):\n return self._traceback", "def check_get_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def getParseErrors(self):\n return [x for x in self.xeps if x.parseErrors]", "def _get_resp_body_errors(self):\n\n if self._resp_body_errors and len(self._resp_body_errors) > 0:\n return self._resp_body_errors\n\n errors = []\n warnings = []\n resp_codes = []\n\n if self.verb is None:\n return errors\n\n dom = self.response.dom()\n if dom is None:\n return errors\n\n for e in dom.findall('Errors'):\n eSeverity = None\n eClass = None\n eShortMsg = None\n eLongMsg = None\n eCode = None\n\n try:\n eSeverity = e.findall('SeverityCode')[0].text\n except IndexError:\n pass\n\n try:\n eClass = e.findall('ErrorClassification')[0].text\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n except IndexError:\n pass\n\n try:\n eShortMsg = smart_encode(e.findall('ShortMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eLongMsg = smart_encode(e.findall('LongMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n if int(eCode) not in resp_codes:\n resp_codes.append(int(eCode))\n except IndexError:\n pass\n\n msg = str(\"Class: {eClass}, Severity: {severity}, Code: {code}, {shortMsg} {longMsg}\") \\\n .format(eClass=eClass, severity=eSeverity, code=eCode, shortMsg=eShortMsg,\n longMsg=eLongMsg)\n\n # from IPython import embed; embed()\n\n if eSeverity == 'Warning':\n warnings.append(msg)\n else:\n errors.append(msg)\n\n self._resp_body_warnings = warnings\n self._resp_body_errors = errors\n self._resp_codes = resp_codes\n\n if self.config.get('warnings') and len(warnings) > 0:\n log.warn(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(warnings)))\n\n if self.response.reply.Ack == 'Failure':\n if self.config.get('errors'):\n log.error(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(errors)))\n\n return errors\n\n return []", "def error_body(self):\n return self._status.error_body", "def get_errors(self, request):\n\n value = request._get_parameter_value(self)\n return value.errors", "def error(self):\n error = self._wrapped.error\n if error:\n return error\n\n return self.json['response'].get('error')", "def last_error_status(self) -> 'outputs.StatusResponse':\n return pulumi.get(self, \"last_error_status\")", "def get_error_handler(self):\n\t\treturn self.error_handler", "def _getErrorNodes(self):\n return self._errorNodes", "def error(self) -> Optional[pulumi.Input['ErrorArgs']]:\n return pulumi.get(self, \"error\")", "def error(self) -> Optional[pulumi.Input['ErrorArgs']]:\n return pulumi.get(self, \"error\")", "def getErrors(self) -> java.util.Collection:\n ...", "def GetDrawErrorband(self):\n return self._drawerrorband", "def getError(self):\n \n return self.resp[\"error\"]", "def do_get_error(self):\n if self._last_exception is None:\n print('no errors')\n else:\n traceback.print_exception(*self._last_exception)", "def error(self) -> 'outputs.StatusResponse':\n return pulumi.get(self, \"error\")", "def get_validation_errors(self):\n return [err.to_dict() for err in self._schema.validator.validation_errors]", "def errors(self) -> Tuple[MqexsErrorInfo, ...]:\n return self.__errors", "def get_bio_gripper_error(self):\r\n return self._arm.get_bio_gripper_error()", "def stderr(self):\n return self.__stderr", "def getLastError(self):\n errors = self.getErrorsList()\n if (len(errors) > 0):\n return errors[len(errors) - 1]\n return None;", "def GetAll(self):\n return self._errors.copy()", "def error_type(self):\n return self.__class__.__name__", "def get_errors(self):\n df = self.get_events()\n return df[df.error.notnull()]", "def ERR(self):", "def error_entity(self):\n return self._error_entity", "def f1QRB_std_errs(self) -> Dict[int, Optional[float]]:\n return {qs.id: qs.f1QRB_std_err for qs in self.qubits_specs}", "def get_current_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number\n return errors_ret", "def get_current_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number\n return errors_ret", "def yerr(self, i):\n return self.errors[1][i]", "def get_error_type(self):\n\n return self.err_type", "def get_error_file(self):\n pass", "def get_error_page(self, loadbalancer):\n return loadbalancer.get_error_page()", "def errorCheck(self, refresh_error = True):\n error = self.error_status\n if refresh_error:\n self.error_status = Modem.ErrorDict.NONE\n return error", "def get_warnings(self):\n pass", "def errorResponse(self):\n return self._errorResponse" ]
[ "0.6346581", "0.6290803", "0.61281234", "0.60806453", "0.59473616", "0.5876309", "0.585333", "0.5807219", "0.5801431", "0.5784405", "0.57784134", "0.576894", "0.57465225", "0.57465225", "0.57174766", "0.5715621", "0.5715621", "0.5715621", "0.5680192", "0.56616455", "0.5624751", "0.5611684", "0.55759263", "0.55685544", "0.55388784", "0.5533462", "0.55222183", "0.55069", "0.54646885", "0.54543144", "0.5433304", "0.5421581", "0.541348", "0.54028904", "0.53973794", "0.53970057", "0.53838044", "0.5377327", "0.5348079", "0.53470355", "0.53419185", "0.53316826", "0.5298869", "0.52970487", "0.5296435", "0.52948755", "0.52830386", "0.52663124", "0.5251709", "0.5241118", "0.5236176", "0.5216344", "0.5197576", "0.5196383", "0.5189985", "0.51825225", "0.51825225", "0.5176365", "0.51679474", "0.5167729", "0.5155675", "0.51512164", "0.51473", "0.5143277", "0.5116535", "0.5110181", "0.5103108", "0.51029843", "0.50972885", "0.509674", "0.50925875", "0.50924104", "0.5088518", "0.5078647", "0.5078647", "0.50737834", "0.5071398", "0.50712496", "0.5056509", "0.5055245", "0.50528055", "0.50440854", "0.50376064", "0.50268453", "0.50241494", "0.50233585", "0.5015277", "0.49976334", "0.49856028", "0.49734733", "0.49724647", "0.49698672", "0.49698672", "0.4967706", "0.49543723", "0.49453098", "0.49305668", "0.4924635", "0.49245867", "0.4923156" ]
0.7447873
0
Gets the current kmeans_features
def _get_kmeans_features(self): self._validate_kmeans_features() if self.kmeans_features == "auto": if self._get_mode() == "Explain": return False if self._get_mode() == "Perform": return False if self._get_mode() == "Compete": return True if self._get_mode() == "Optuna": return False else: return deepcopy(self.kmeans_features)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_features(self):\n return self._features", "def features(self):\n return self._features", "def features(self):\n\n return self._features", "def get_features(self):\n if self.strokes is False:\n print('Isolating strokes')\n self.isolate_strokes()\n # List of features to use (sm1 omitted because always nan)\n feature_names = ('zrc', 'centroid',\n 'cm0', 'cm1', 'cm2', 'cm3', 'cm4',\n 'sm0', 'sm2')\n features_list = []\n for istroke in self.strokes:\n if not self.isGoodFrame(istroke):\n continue\n ifeature_dic = self.extract_features_from_frame(istroke)\n ifeature_list = []\n for ifeature in feature_names:\n ifeature_list.append(ifeature_dic[ifeature])\n features_list.append(ifeature_list)\n return {'feature_names': feature_names,\n 'feature_table': np.array(features_list)}", "def findK_centroids_average(self, features, clusters):\n\n class InnerFeatures:\n def __init__(self, kps, des, pos):\n self.kps = kps\n self.des = des\n self.pos = pos\n\n kmeans = KMeans(n_clusters=clusters)\n\n pts = np.array(features.pos)\n kps = np.array(features.kps)\n des = np.array(features.des)\n\n kmeans.fit(pts)\n m_clusters = np.array(kmeans.labels_.tolist())\n centers = np.array(kmeans.cluster_centers_)\n\n # KeyPoint(x,y,size) -required\n\n final_kps = []\n final_des = []\n final_pts = []\n\n for cluster in range(clusters):\n indices = np.where(m_clusters == cluster)\n cluster_kps_size = np.mean(np.array([x.size for x in kps[indices]]))\n cluster_des = des[indices]\n\n average_des = np.mean(cluster_des, axis=0)\n cluster_kps = cv2.KeyPoint(x=centers[cluster][0], y=centers[cluster][1], _size=cluster_kps_size)\n\n final_kps.append(cluster_kps)\n final_des.append(average_des)\n final_pts.append([centers[cluster][0], centers[cluster][1]])\n\n final_pts = np.array(final_pts)\n final_des = np.array(final_des)\n final_kps = np.array(final_kps)\n\n result = InnerFeatures(kps=final_kps, des=final_des, pos=final_pts)\n return result", "def features(self) -> Union[np.ndarray, Dict[str, np.ndarray]]:\n return self._features", "def cluster_feature(feature_mat, k):\n whitened = whiten(feature_mat.transpose())\n centroid, distortion = kmeans(whitened, k)\n\n return centroid, distortion", "def get_features(self):\n return []", "def get_features(self):\n if not self.exposes_features:\n return None\n\n return self._last_features", "def get_cat3_features(self):\n return self.category3_features", "def features(self) -> Optional[pulumi.Input['ProvisionedClustersCommonPropertiesFeaturesArgs']]:\n return pulumi.get(self, \"features\")", "def matrix_features(self):\n return self._matrix_features", "def cluster_features(self):\n logger.info('Creating term-document matrix...')\n self._create_tdm()\n init_centroids = self.centroids_from_categories()\n\n # Cluster the features using specific centroids.\n logger.info('Clustering features...')\n self.kmeans = KMeans(init=init_centroids, n_init=1, max_iter=1, n_clusters=len(self.feature_categories))\n self.clusters = self.kmeans.fit_predict(self.tdm)\n\n # The feature vector maps key features (categories) to other features that occur in the same cluster.\n logger.info('Converting clusters to feature vectors...')\n feature_vectors = self.clusters_to_feature_vectors(category_features=list(self.feature_amenity_map.keys()))\n\n return feature_vectors", "def feature_matrix(self):\n return self._feat_matrix", "def kmeans_clustering(self,k):\r\n \r\n print(colored(\"Performing K-means clustering with %d clusters\\n\"%k,color = 'yellow', attrs=['bold']))\r\n kmeans = KMeans(n_clusters=k, random_state=0, n_init=10, max_iter=100, n_jobs=-1, ).fit(self.X)\r\n self.labels = kmeans.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The k-means inertia is %0.002f\\n\" %(kmeans.inertia_),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels , kmeans.cluster_centers_,kmeans", "def __kmeans(self, points):\n # Prepare initial centers using K-Means++ method.\n initial_centers = kmeans_plusplus_initializer(points, 10).initialize()\n # Create instance of K-Means algorithm with prepared centers.\n self.__kmeans_instance = kmeans(sample, initial_centers)\n # Run cluster analysis and obtain results.\n kmeans_instance.process()\n kclusters = kmeans_instance.get_clusters()\n kcenters = kmeans_instance.get_centers()\n return kclusters, kcenters", "def features(self):\n return self.shape[2]", "def get_feature_labels(self):\n return self.feature_labels", "def calculate_kmeans(df, clusters=10):\r\n kmeans = KMeans(n_clusters=clusters)\r\n labels = kmeans.fit_predict(df)\r\n\r\n return kmeans, labels", "def get_other_features(self):\n return self.other_features", "def get_cat1_features(self):\n return self.category1_features", "def kmeans(points,n_clusters):\n # create kmeans object\n kmeans = KMeans(n_clusters=n_clusters)\n # fit kmeans object to data\n kmeans.fit(points)\n # print location of clusters learned by kmeans object\n print(kmeans.cluster_centers_)\n # save new clusters for chart\n y_km = kmeans.fit_predict(points)\n\n print('Clusters partition: ', Counter(y_km))\n \n return y_km, kmeans", "def kmeans(k, descriptor_list):\r\n kmeans = KMeans(n_clusters = k, n_init=10, verbose = 1) \r\n kmeans.fit(descriptor_list)\r\n visual_words = kmeans.cluster_centers_ \r\n return visual_words", "def get_clusters(self):\r\n\r\n return self.__clusters", "def features(self) -> List[np.ndarray]:\n return None", "def read_features(self):\r\n def unpack_keypoint(data):\r\n try:\r\n kpts = data['keypoints']\r\n desc = data['descriptors']\r\n keypoints = [cv.KeyPoint(x, y, _size, _angle, _response, int(_octave), int(_class_id))\r\n for x, y, _size, _angle, _response, _octave, _class_id in list(kpts)]\r\n return keypoints, np.array(desc)\r\n except(IndexError):\r\n return np.array([]), np.array([])\r\n try:\r\n data = np.load(self.features_path + self.id + \".npz\")\r\n self.keypoints, self.descriptors = unpack_keypoint(data)\r\n logging.info(f\"Existing features for {self.name} found in features directory.\")\r\n except FileNotFoundError:\r\n logging.info(f\"Features for {self.name} not found in {self.features_path}.\")", "def get_local_features(self, img):\n kp, des = self.fe.detectAndCompute(img, None)\n return kp, des", "def get_centroids(self) -> Dict[str, np.ndarray]:\n assert self._centroids != {}\n return self._centroids", "def features(self) -> List[Feature]:\n return self._features", "def findK_centroids_closest(self, features, clusters):\n\n class InnerFeatures:\n def __init__(self, kps, des, pos):\n self.kps = kps\n self.des = des\n self.pos = pos\n\n kmeans = KMeans(n_clusters=clusters)\n\n pts = np.array(features.pos)\n kps = np.array(features.kps)\n des = np.array(features.des)\n\n kmeans.fit(pts)\n m_clusters = kmeans.labels_.tolist()\n centers = np.array(kmeans.cluster_centers_)\n\n closest, _ = pairwise_distances_argmin_min(kmeans.cluster_centers_, pts)\n\n assert len(set(closest)) == clusters\n\n result = InnerFeatures(kps[closest], des[closest], pts[closest])\n return result", "def get_cat2_features(self):\n return self.category2_features", "def clusters(self):\n\t\tif self._record is None:\n\t\t return []\n\t\tclusters = [i for i in self._record.features if i.type == 'cluster']\n\t\treturn clusters", "def get_clusters(self):\n return self._clusters", "def get_style1_features(self):\n return self.style1_features", "def GetFeatureMeans(self) -> \"itkVectorContainerUCD_Pointer const &\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterISS3_GetFeatureMeans(self)", "def get_who_features(self):\n return self.who_made_features", "def features(self):\n if not self.features_changed:\n return self.__features\n\n f = ()\n # distance features\n for i, d in enumerate(self.dist_features):\n if i % 2 == 1 and ARGS.diag_dists:\n # extract total distance for diagonal features\n try:\n f += (d[2], )\n except TypeError:\n pass\n elif i % 2 == 0 and ARGS.hv_dists:\n f += (d, )\n\n if ARGS.direc_dist:\n f += (np.linalg.norm(self.direc_dist), )\n\n # speed features\n if ARGS.speed:\n f += self.speed_features\n\n # current ball_position\n if ARGS.ballpos:\n f += self.ball_pos_stamps[-1][0]\n\n # checkpoints\n for i in ARGS.cp_ids:\n dx = self.checkpoints[(self.current_section_id + i) % self.num_cps][0] - self.ball_pos_stamps[-1][0][0]\n dy = self.checkpoints[(self.current_section_id + i) % self.num_cps][1] - self.ball_pos_stamps[-1][0][1]\n if False: # maybe add a variable later\n norm = sqrt(dx ** 2 + dy ** 2)\n if norm > 0:\n dx = dx / norm\n dy = dy / norm\n f += (dx, dy)\n\n self.__features = f\n self.features_changed = False\n return f", "def GetFeatureMeans(self) -> \"itkVectorContainerUCD_Pointer const &\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterIF3_GetFeatureMeans(self)", "def clusters(self) -> ndarray:\n return self._clusters", "def GetFeatureMeans(self) -> \"itkVectorContainerUCD_Pointer const &\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterISS2_GetFeatureMeans(self)", "def GetFeatureMeans(self) -> \"itkVectorContainerUCD_Pointer const &\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterIF2_GetFeatureMeans(self)", "def getFittedKMeansModel ( features, nbClusters, nbInitialisations = 1000, maxNbIterations=10000):\r\n\r\n\tkMeansModel=KMeans(n_clusters=nbClusters, n_init=nbInitialisations, max_iter=maxNbIterations, n_jobs=-1)\r\n\tkMeansModel.fit(features)\r\n\treturn kMeansModel", "def get_categorical_features(self):\n return self.categorical_features", "def cluster(self,method=\"kmeans\",properties=None,k=3):\n try :\n from sklearn.cluster import KMeans, Ward\n from sklearn import __version__\n except :\n logger.warning(\"install scikits-learning package\")\n return\n X = [] #List of feature vector of each blob\n if not properties:\n properties = ['color','shape','position']\n if k > len(self):\n logger.warning(\"Number of clusters cannot be greater then the number of blobs in the featureset\")\n return\n for i in self:\n featureVector = []\n if 'color' in properties:\n featureVector.extend(i.mAvgColor)\n if 'shape' in properties:\n featureVector.extend(i.mHu)\n if 'position' in properties:\n featureVector.extend(i.extents())\n if not featureVector :\n logger.warning(\"properties parameter is not specified properly\")\n return\n X.append(featureVector)\n\n if method == \"kmeans\":\n \n # Ignore minor version numbers.\n sklearn_version = re.search(r'\\d+\\.\\d+', __version__).group()\n \n if (float(sklearn_version) > 0.11):\n k_means = KMeans(init='random', n_clusters=k, n_init=10).fit(X)\n else:\n k_means = KMeans(init='random', k=k, n_init=10).fit(X)\n KClusters = [ FeatureSet([]) for i in range(k)]\n for i in range(len(self)):\n KClusters[k_means.labels_[i]].append(self[i])\n return KClusters\n\n if method == \"hierarchical\":\n ward = Ward(n_clusters=int(sqrt(len(self)))).fit(X) #n_clusters = sqrt(n)\n WClusters = [ FeatureSet([]) for i in range(int(sqrt(len(self))))]\n for i in range(len(self)):\n WClusters[ward.labels_[i]].append(self[i])\n return WClusters", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def get_material_features(self):\n return self.material_features", "def get_clusters(self):\n\n return self.__clusters", "def feature(self):\n return self._feature", "def feature(self):\n return self._feature", "def getFeatureDicts(self):\n pass", "def kmeans(img, k):\n # Randomly pick k pixels as initial cluster \"means\"\n # Random indices are picked without replacement; to avoid duplicate means\n n = len(img) \n rand_ind = np.random.choice(n, size=k, replace=False) \n means = img[rand_ind, :].astype(np.float32) \n\n print \"Using Kmeans..\"\n return kmeans_driver(img, means)", "def get_style2_features(self):\n return self.style2_features", "def run_k_means(self):\r\n centroids = self.centroids\r\n\r\n for i in range(self.max_iters):\r\n self.closestcentroids()\r\n self.newcentroids()\r\n\r\n J = 0\r\n X = self.x\r\n m = len(X)\r\n idx = self.index\r\n K = self.K\r\n dim = X.shape[1]\r\n\r\n for num in range(K):\r\n # find the index of all entries where idx==n\r\n indexentries = np.nonzero(idx == num)[0]\r\n # the values in X that have the index in indesxentries\r\n values = X[indexentries]\r\n # using one of the K centroids to do the calculation. K<=2 doesn't\r\n # work here for some reason.\r\n centroid = centroids[num, 0]\r\n J += np.sum((values - centroid) ** 2)\r\n\r\n return [centroids.reshape((1, K, dim)), [X[idx == k].size for k in range(K)], J / m]", "def get_clusters(self,points):\n self.points = points\n self.__dabest = [self.__cmeans(points,i) for i in range(self.__start,self.__end)]\n ##self.hull = \n return self.__dabest", "def get_means(self):\n if self.metadata is None:\n self.get_metadata()\n\n # we want only the numerical features\n df = self.metadata.select_dtypes(include=['int64', 'float64'])\n return df.mean()", "def cluster(self):\n logger.debug(\"Beginning feature based clustering on %d clusters.\" % len(self.c2b))\n # Merge the two nearest clusters until we can't.\n #\n while self.mergeNearestClusters():\n pass\n logger.debug(\"After clustering, there are now %d clusters remaining.\" % len(self.c2b))\n return self.c2b.values()", "def special_features(self):\r\n return self._special_features", "def GetFeatureMeans(self) -> \"itkVectorContainerUCD_Pointer const &\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterIUS3_GetFeatureMeans(self)", "def cluster_kmeans(self, data, n_clusters):\n km = cl.KMeans(n_clusters)\n kmf = km.fit(data)\n\n labels = kmf.labels_\n\n return labels, [np.nan]", "def get_feature_names(self):\n ...", "def GetFeatureMeansOutput(self) -> \"itkDataObjectDecoratorVCUCD const *\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterISS3_GetFeatureMeansOutput(self)", "def kernel_data(self):\n\t\treturn self.weights_data()", "def get_all_features(self) :\n raise NotImplementedError", "def GetFeatureMeans(self) -> \"itkVectorContainerUCD_Pointer const &\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterIUS2_GetFeatureMeans(self)", "def get_features(self):\n x,y = self.agent\n return np.array([x,y])", "def actual_kpoints(self):\n kpoints = get_data_node('array.kpoints')\n kpoints.set_kpoints(\n self.vasprun_obj.actual_kpoints,\n weights=self.vasprun_obj.actual_kpoints_weights)\n return kpoints", "def GetFeatureMeans(self) -> \"itkVectorContainerUCD_Pointer const &\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterIUC3_GetFeatureMeans(self)", "def cluster_all_features(feature_mat):\n n_dims = feature_mat.shape[1]\n whitened = whiten(feature_mat.transpose())\n all_codebooks = dict()\n for k in range(n_dims, 0, -1):\n centroids, distortion = kmeans(whitened, k)\n all_codebooks[k] = (distortion, centroids)\n\n return all_codebooks", "def GetFeatureMeansOutput(self) -> \"itkDataObjectDecoratorVCUCD const *\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterIF3_GetFeatureMeansOutput(self)", "def GetFeatureMeans(self) -> \"itkVectorContainerUCD_Pointer const &\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterIUC2_GetFeatureMeans(self)", "def kmeans_centroids(fit_centroids=False):\n\n trainX = np.memmap('data/train_cropped_150.memmap', mode='r', shape=(N_TRAIN, 150, 150, 3))\n # Not used yet\n testX = np.memmap('data/test_cropped_150.memmap', mode='r', shape=(N_TEST, 150, 150, 3))\n\n if fit_centroids:\n km = models.KMeansFeatures.KMeansFeatures(rf_size=6, num_centroids=1600, num_patches=400000)\n km.fit(trainX)\n\n t0 = time.time()\n pickle.dump(km, open('data/kmeans_centroids.pkl', mode='wb'))\n print 'Pickling the KMeansFeatures object took {0} seconds'.format(time.time() - t0)\n else:\n km = pickle.load(open('data/kmeans_centroids.pkl'))\n\n models.KMeansFeatures.show_centroids(km.centroids, 6, (6, 6, 3))", "def km_feature_select(self,X_columns, y_column, k, n_features = 2):\n\n try:\n X = self.df_input[X_columns]\n y = self.df_input[[y_column]]\n features = pd.concat([X,y],axis=1)\n features_selected = [y_column]\n while(n_features>1):\n temp_selected = \"\"\n temp_coef = 0\n for col in X_columns:\n temp_feat_sel = np.append(features_selected,col)\n kmeans_model = KMeans(n_clusters=k,random_state=0).fit(features[temp_feat_sel])\n labels = kmeans_model.labels_\n sil_coef = silhouette_score(features[temp_feat_sel], labels, metric='euclidean')\n if((col not in features_selected) and (sil_coef>temp_coef)):\n temp_coef = sil_coef\n temp_selected = col\n features_selected = np.append(features_selected,temp_selected)\n n_features -= 1\n return features_selected\n\n except Exception as e:\n print(e)", "def specialFeatures(self):\r\n return self._specialFeatures", "def kmeans(X, n_clust):\n\n X = scale(X)\n estimator = KMeans(init = 'k-means++', n_clusters = n_clust, n_init = 10, verbose = 2)\n \n estimator.fit(X)\n labels = estimator.predict(X)\n return labels", "def tree_features(self):\n return self._tree_features", "def features(self) -> List[np.ndarray]:\n if len(self.data) == 0 or self.data[0].features is None:\n return None\n\n return [d.features for d in self.data]", "def clusters(self):\n return self._clusters", "def GetFeatureMeansOutput(self) -> \"itkDataObjectDecoratorVCUCD const *\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterISS2_GetFeatureMeansOutput(self)", "def get_clusters(self):\n fields = ['name', ]\n return self.get_data(\"clusters\", fields)", "def get_state(self):\n return self.kf.x[:self.dim_z].squeeze()", "def start_algorithm(self):\r\n vectors = self.vectorize_data()\r\n kmeans = KMeans(init='k-means++', n_clusters=self.cluster_amount, n_init=10)\r\n kmeans.fit(vectors)\r\n return self.cluster_tweet(kmeans.labels_)", "def centroids(self):\n return self.mdm_.covmeans_", "def readFeatures(self):\n\t\treturn self._fileSystem.readFeatures()", "def k_means_step(X, k, means):\n dists = np.array([np.sum((X - mean) * (X - mean), axis=1) for mean in means]) # k*m\n clusters = np.argmin(dists, axis=0)\n new_means = np.array([np.mean(X[clusters == i, :], axis=0) for i in range(k)])\n return new_means, clusters", "def get_original_means(self):\n return self.meanDataValues", "def GetFeatureMeansOutput(self) -> \"itkDataObjectDecoratorVCUCD const *\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterIF2_GetFeatureMeansOutput(self)", "def __get_feature_mat(self, (cluster, articleID)):\n feat = self.feature_cache.get((cluster, articleID))\n\n if feat is None:\n feat = np.outer(self.user_feat[cluster],\n self.article_feat[articleID])\n self.feature_cache[(cluster, articleID)] = feat\n\n return feat", "def SIFT_features(self):\n start_time = datetime.datetime.now() \n self.func_log(\"\\n\\tIn SIFT_features()\")\n \n key_points = {}\n descriptor_list = []\n SIFT = cv2.xfeatures2d.SIFT_create()\n \n self.func_log(\"\\t\\tSIFT feature extraction start\")\n \n for key, value in self.images.items():\n features = [] \n for img in value:\n kp, des = SIFT.detectAndCompute(img,None) \n descriptor_list.extend(des)\n features.append(des)\n \n key_points[key] = features \n \n self.func_log(\"\\t\\t\\tKEY: {} finished\".format(key))\n \n self.descriptor_list = descriptor_list\n self.key_points = key_points \n \n end_time = datetime.datetime.now() \n self.func_log(\"\\n\\t\\tTime Cost: {}\\n\".format(end_time-start_time))", "def getFeatureClassNames(self):\n return self.featureClasses.keys()", "def GetFeatureMeansOutput(self) -> \"itkDataObjectDecoratorVCUCD const *\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterIUS3_GetFeatureMeansOutput(self)", "def get_feature_names(self):\n return [self.char]", "def kmeans_001(fit_centroids=False):\n trainX = np.memmap('data/train_cropped_150.memmap', mode='r', shape=(N_TRAIN, 150, 150, 3))\n # Not used yet\n testX = np.memmap('data/test_cropped_150.memmap', mode='r', shape=(N_TEST, 150, 150, 3))\n\n if fit_centroids:\n km = models.KMeansFeatures.KMeansFeatures(rf_size=6, num_centroids=1600, num_patches=400000)\n km.fit(trainX)\n\n km.save_to_file('mdl_kmeans_ridge_rf_001')\n # t0 = time.time()\n # pickle.dump(km, open('data/kmeans_centroids.pkl', mode='wb'))\n # print 'Pickling the KMeansFeatures object took {0} seconds'.format(time.time() - t0)\n else:\n km = models.KMeansFeatures.KMeansFeatures.load_from_file('mdl_kmeans_ridge_rf_001')\n # km = pickle.load(open('data/kmeans_centroids.pkl'))\n\n n = 10000\n\n train_x = km.transform(trainX[0:n, :])\n train_y = classes.train_solutions.data[0:n, :]\n # train_x = km.transform(trainX)\n # train_y = classes.train_solutions.data\n\n logger.info(\"Train x shape: {}\".format(train_x.shape))\n logger.info(\"Train y shape: {}\".format(train_y.shape))\n\n kf = KFold(n, n_folds=2, shuffle=True)\n\n for train, test in kf:\n # clf = models.Ridge.RidgeRFEstimator()\n # clf.rf_rgn = RandomForestRegressor(n_estimators=250, n_jobs=4, verbose=3)\n clf = RandomForestRegressor(n_estimators=20, n_jobs=4, verbose=3, random_state=0, oob_score=True)\n clf.fit(train_x[train], train_y[train])\n res = clf.predict(train_x[test])\n classes.rmse(train_y[test], res)", "def atoms_features(self) -> numpy.array:\n return numpy.asarray(self._atoms_features, dtype=numpy.float32)", "def __kmeans__(cls, cluster_size, pca_reduced, names: list):\n import warnings\n warnings.filterwarnings(\"ignore\")\n clusterer = KMeans(n_clusters=cluster_size, random_state=10)\n cluster_labels = clusterer.fit_predict(pca_reduced)\n result = list()\n result.append(ClusterProcessor.davies_bouldin(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.variance_ratio_criterion(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.silhouette_coefficient(cluster_labels, pca_reduced, cluster_size, names))\n return result", "def get_article_features(self, article_id):\n return self.article_features[article_id]", "def featureNames(self):\n return [feature.name for feature in self.features]", "def kmean(X,initial_centroids,max_iters):\n m = np.size(X,0)\n K = np.size(initial_centroids,0)\n centroids = initial_centroids\n idx = np.zeros((m,1))\n for i in range(1,max_iters):\n idx = nearest_cluster(X,centroids)\n centroids = update_centroids(X,idx,K)\n return centroids,idx", "def _get_features(self, session):\n feature_utils.qsr_feature_extractor( session, get_location_objects = feature_utils.get_location_objects_most_active )\n feature_utils.standardize_simple(session, self.config)\n\n # feature_utils.marker_feature_extractor( session, get_location_objects = feature_utils.get_location_objects_most_active )\n\n return session[SESSION_FEAT]", "def kmeans_cluster(X_train_input, n_clusters=100):\r\n from sklearn.cluster import MiniBatchKMeans\r\n image_descriptors = []\r\n [image_descriptors.extend(ORB_feature_extractor(img)) for img in X_train_input]\r\n image_descriptors = np.array(image_descriptors) \r\n \r\n kmeans_model = MiniBatchKMeans(n_clusters=n_clusters, init_size=5*n_clusters,\r\n random_state=34, batch_size=128).fit(image_descriptors)\r\n \r\n return kmeans_model", "def extract_features(self, data):\n\n # TODO: Should feature extraction be done on the testing data? In the lecture notes\n # TODO: it is not done with the training data, but with the test data.\n # TODO: Maybe we should use the validate data when we do cross-validation.\n\n features = np.zeros([len(data)*self.K]).reshape(len(data), self.K)\n for i in range(len(data)):\n for j in range(self.K):\n features[i][j] = np.linalg.norm(data[i] - self.cb_vectors[j])\n\n return features" ]
[ "0.6775403", "0.6481033", "0.63745344", "0.63635355", "0.62884897", "0.62546325", "0.612313", "0.6063174", "0.5997075", "0.5957198", "0.5954714", "0.5915384", "0.58808947", "0.58574826", "0.58393997", "0.58329165", "0.5800852", "0.57675624", "0.57581013", "0.57575", "0.5742533", "0.5737699", "0.5731866", "0.57173544", "0.5700815", "0.56965566", "0.5691166", "0.5670271", "0.56661594", "0.5664207", "0.56467646", "0.56310415", "0.562339", "0.5622132", "0.56217366", "0.5614672", "0.56142217", "0.56141204", "0.5612954", "0.56007683", "0.5593647", "0.5589304", "0.5580577", "0.5570414", "0.5544393", "0.5541123", "0.55314016", "0.5528067", "0.5528067", "0.5517564", "0.5510684", "0.5481286", "0.5476134", "0.54723275", "0.54701513", "0.54690635", "0.5463391", "0.54631627", "0.54550904", "0.5449688", "0.5423337", "0.54191077", "0.5416891", "0.541491", "0.5412495", "0.5407931", "0.5404231", "0.5402306", "0.5394454", "0.53882766", "0.5380673", "0.53770447", "0.53746223", "0.5364406", "0.5354547", "0.5345116", "0.5336058", "0.53353465", "0.53297025", "0.53295106", "0.532583", "0.5316904", "0.5315922", "0.5315428", "0.53145397", "0.5314315", "0.5312342", "0.5300107", "0.5299984", "0.5298181", "0.52788705", "0.52670497", "0.5257334", "0.52555674", "0.52554107", "0.52438605", "0.52412957", "0.5240396", "0.5238571", "0.52371216" ]
0.7356534
0
Gets the current mix_encoding
def _get_mix_encoding(self): self._validate_mix_encoding() if self.mix_encoding == "auto": if self._get_mode() == "Explain": return False if self._get_mode() == "Perform": return False if self._get_mode() == "Compete": return True if self._get_mode() == "Optuna": return False else: return deepcopy(self.mix_encoding)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encoding(self):\n return self.original.encoding", "def encoding(self):\n\n return self._encoding", "def encoding(self):\n return self.get_encoding()", "def encoding(self):\n return self._encoding", "def encoding(self):\n return self._encoding", "def encoding(self):\n return self._encoding", "def encoding(self):\n return self._encoding", "def encoding(self):\n return self._enc", "def getEncoding(self):\n return _libsbml.XMLInputStream_getEncoding(self)", "def GetEncoding(self): \n return self.file.GetEncoding()", "def encoding(self) -> str:\n return self._encoding", "def getUnicodeEncoding(self):\n return self.__unicodeEncoding", "def get_data_encoding(self):\n return self._data_encoding", "def get_encoding(self, asset=None):\n if asset is None or 'pc:encoding' not in asset.properties:\n return self.item.properties.get('pc:encoding')\n else:\n return asset.properties.get('pc:encoding')", "def audio_codec(self):\n # type: () -> string_types\n return self._audio_codec", "def detect_encoding(self):\n if hasattr(self, '_detect_encoding'):\n return self._detect_encoding\n\n if self.data:\n self._detect_encoding = charlockholmes.detect(self.data)\n return self._detect_encoding", "def get_encoding(self): # real signature unknown; restored from __doc__\n return \"\"", "def encoding(self):\n if self._encoding:\n return self._encoding\n\n # Scan meta tags for charset.\n if self._html:\n self._encoding = html_to_unicode(self.default_encoding, self._html)[0]\n # Fall back to requests' detected encoding if decode fails.\n try:\n self.raw_html.decode(self.encoding, errors='replace')\n except UnicodeDecodeError:\n self._encoding = self.default_encoding\n\n return self._encoding if self._encoding else self.default_encoding", "def encoding(self) -> 'layout.Encoding':", "def getEncoding(self):\n noteSimple = self.simplify()\n return self.octaveIndices.get(noteSimple.getNoteName().lower(), None)", "def record_encoding(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"record_encoding\")", "def record_encoding(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"record_encoding\")", "def encoding_cctf(self):\n\n return self._encoding_cctf", "def codec(self):\n codec_name = None\n if self.__dict__['codec_name']:\n codec_name = self.__dict__['codec_name']\n return codec_name", "def get_encoding():\n return {\n \"resolution\": RESOLUTION,\n \"max_beat\": MAX_BEAT,\n \"max_duration\": MAX_DURATION,\n \"dimensions\": DIMENSIONS,\n \"n_tokens\": N_TOKENS,\n \"type_code_map\": TYPE_CODE_MAP,\n \"beat_code_map\": BEAT_CODE_MAP,\n \"position_code_map\": POSITION_CODE_MAP,\n \"pitch_code_map\": PITCH_CODE_MAP,\n \"duration_code_map\": DURATION_CODE_MAP,\n \"instrument_code_map\": INSTRUMENT_CODE_MAP,\n \"code_type_map\": CODE_TYPE_MAP,\n \"code_beat_map\": CODE_BEAT_MAP,\n \"code_position_map\": CODE_POSITION_MAP,\n \"code_pitch_map\": CODE_PITCH_MAP,\n \"code_duration_map\": CODE_DURATION_MAP,\n \"code_instrument_map\": CODE_INSTRUMENT_MAP,\n \"program_instrument_map\": PROGRAM_INSTRUMENT_MAP,\n \"instrument_program_map\": INSTRUMENT_PROGRAM_MAP,\n }", "def charset(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"charset\")", "def encoding(self, outgoing=False, incoming=False):\n # pylint: disable=unused-argument\n return self.default_encoding or \"US-ASCII\"", "def encoding(self, outgoing=False, incoming=False):\n assert outgoing or incoming\n return (self.env.get('CHARSET', self._default_encoding)\n if (outgoing and not incoming and self.outbinary) or (\n not outgoing and incoming and self.inbinary) or (\n outgoing and incoming and self.outbinary and self.inbinary\n ) else 'ascii')", "def charset(self) -> str:\n return pulumi.get(self, \"charset\")", "def get_encoding(enc, default='latin1'):\n\n if not enc:\n return default\n try:\n codecs.lookup(enc)\n return enc\n except LookupError:\n return default", "def encoding_type(self) -> Optional[pulumi.Input[Union[str, 'LiveEventEncodingType']]]:\n return pulumi.get(self, \"encoding_type\")", "def Encoding(self):\n ret = libxml2mod.xmlTextReaderConstEncoding(self._o)\n return ret", "def charset(self):\n return self._charset", "def GetEncoding(*args, **kwargs):\n return _gdi_.FontMapper_GetEncoding(*args, **kwargs)", "def getEncodingUsedByServer(self):\n if not self.serverEncoding:\n try:\n pywikibot.output(\n u'Contacting server %s to find out its default encoding...'\n % self.host)\n conn = self.getConnection()\n conn.request('HEAD', '/', None, self.header)\n self.response = conn.getresponse()\n self.readEncodingFromResponse(self.response)\n except:\n pass\n if not self.serverEncoding:\n # TODO: We might also load a page, then check for an encoding\n # definition in a HTML meta tag.\n pywikibot.output('Error retrieving server\\'s default charset. '\n 'Using ISO 8859-1.')\n # most browsers use ISO 8859-1 (Latin-1) as the default.\n self.serverEncoding = 'iso8859-1'\n return self.serverEncoding", "def flow_encoding_version(self):\n return self._flow_encoding_version", "def mix(self):\n\t\treturn self.bottle.mix", "def get_encoding(obj):\n for line in inspect.findsource(obj)[0][:2]:\n m = get_encoding_re.search(line)\n if m:\n return m.group(1)\n return 'ascii'", "def htmlGetMetaEncoding(self):\n ret = libxml2mod.htmlGetMetaEncoding(self._o)\n return ret", "def getdefaultencoding(): # real signature unknown; restored from __doc__\n return \"\"", "def getsystemenc():\n\n return sys.getfilesystemencoding()", "def GetEncoding(*args, **kwargs):\n return _gdi_.Font_GetEncoding(*args, **kwargs)", "def codec(self):\n raise NotImplementedError", "def bom_encoding(self) -> Optional[str]:\n return read_bom(self)[0]", "def get_encoded(self):\n pass", "def codec_tag(self):\n codec_t = None\n if self.__dict__['codec_tag_string']:\n codec_t = self.__dict__['codec_tag_string']\n return codec_t", "def rotor_setting(self):\n return self._charset[self._rot_offset]", "def declared_encoding(self) -> Optional[str]:\n return html_body_declared_encoding(self)", "def get_site_encoding():\n global SITE_ENCODING\n if SITE_ENCODING is None:\n encoding = desktop.conf.DEFAULT_SITE_ENCODING.get()\n if not validate_encoding(encoding):\n default = desktop.conf.DEFAULT_SITE_ENCODING.config.default_value\n msg = 'Invalid HUE configuration value for %s: \"%s\". Using default \"%s\"' % \\\n (desktop.conf.DEFAULT_SITE_ENCODING.config.key, encoding, default)\n logging.error(msg)\n encoding = default\n SITE_ENCODING = encoding\n return SITE_ENCODING", "def video_codec(self):\n # type: () -> string_types\n return self._video_codec", "def whichEncoding(self):\n if self.request.mode in BROWSER_REQUEST_MODES:\n if self.fields.getOpenIDNamespace() == OPENID2_NS and \\\n len(self.encodeToURL()) > OPENID1_URL_LIMIT:\n return ENCODE_HTML_FORM\n else:\n return ENCODE_URL\n else:\n return ENCODE_KVFORM", "def charset(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"charset\")", "def charset(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"charset\")", "def client_encoding(self) -> str:\n pgenc = self.pgconn.parameter_status(b\"client_encoding\") or b\"UTF8\"\n return encodings.pg2py(pgenc)", "def __define_encoding(self, file_name):\n with open(file_name, 'rb') as f:\n raw_data = b''.join([f.readline() for _ in range(self.__num_lines)])\n return self.__chardet.detect(raw_data)['encoding']", "def encoding(self) -> Optional[str]:\n return (\n self._encoding\n or self._body_bom_encoding()\n or self._headers_declared_encoding()\n or self._body_declared_encoding()\n or self._body_inferred_encoding()\n )", "def codec(cls) -> str:\n return 'UTF8'", "def get_valueTransferEncoding(self):\n return \"utf-8\"", "def original_decoder(self):\n return self._original_decoder", "def determineEncoding(self, filepath):\n with open(self.filepath,\"r\",encoding='utf-16') as reader: \n try:\n line = reader.readline()\n return \"utf-16\"\n except:\n return \"utf-8\"", "def declared_encoding(self) -> Optional[str]:\n content_type = self.get(\"Content-Type\", \"\")\n return http_content_type_encoding(content_type)", "def get_encoding(fname):\n file = open(fname, 'rb')\n encoding = chardet.detect(file.read())['encoding']\n return encoding", "def getOutputCharset( self, lang=None ):\n langinfo = getLanguageInfo( lang or self )\n return Charset( langinfo['python_charset'], langinfo['mail_charset'] )", "def coder(self):\r\n return self.coder", "def getregentry():\n return codecs.CodecInfo(\n name=ENCODING_NAME,\n encode=Codec().encode,\n decode=Codec().decode,\n incrementalencoder=IncrementalEncoder,\n incrementaldecoder=IncrementalDecoder,\n streamreader=StreamReader,\n streamwriter=StreamWriter,\n )", "def getCipherImplementation(self):\r\n if not self._writeState.encContext:\r\n return None\r\n return self._writeState.encContext.implementation", "def get_encoding_errors(self):\n return self._encoding_errors", "def get_encoder(encoding):\n if encoding == Encoding.V1_THRIFT:\n return _V1ThriftEncoder()\n if encoding == Encoding.V1_JSON:\n return _V1JSONEncoder()\n if encoding == Encoding.V2_JSON:\n return _V2JSONEncoder()\n if encoding == Encoding.V2_PROTO3:\n return _V2ProtobufEncoder()\n raise ZipkinError(\"Unknown encoding: {}\".format(encoding))", "def get_encoding_dict(self) -> BoardEncodingDict:\n encoding_dict: BoardEncodingDict = {field: formats['code'] for field, formats in self.board_formatting.items()}\n return encoding_dict", "def encoding(self, code) -> str:\n return 'utf-8'", "def getpreferredencoding() -> str:\n return locale.getpreferredencoding() or \"UTF-8\"", "def get_charset(self):\n _lib.caca_get_dither_charset.argtypes = [_Dither]\n _lib.caca_get_dither_charset.restype = ctypes.c_char_p\n\n return _lib.caca_get_dither_charset(self)", "def encode(self):\n\n return self.get_content()", "def setEncoding(self,value):\n self.PDFreactorConfiguration.in1[\"encoding\"] = value", "def encodings(self, code):\n return (self.encoding(code), )", "def get_encoding_type(file): \n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']", "def ring_characters(self):\n return self._charset", "def get_decoder(self):\n raise NotImplementedError()", "def GetDefaultEncoding(*args, **kwargs):\n return _gdi_.Font_GetDefaultEncoding(*args, **kwargs)", "def get_current_mode(self):\n return self.read(0xa2)", "def objc_type_encoding(self):\r\n if not hasattr(self, '_objc_type_encoding'):\r\n self._objc_type_encoding = \\\r\n conf.lib.clang_getDeclObjCTypeEncoding(self)\r\n\r\n return self._objc_type_encoding", "def get_encoded(self):\n return self.key", "def getfilesystemencoding(): # real signature unknown; restored from __doc__\n return \"\"", "def stream_encoding(stream):\n encoding = getattr(stream, 'encoding', None)\n # Windows returns 'cp0' to indicate no encoding\n return encoding if encoding not in (None, 'cp0') else 'utf-8'", "def FontMapper_GetEncoding(*args, **kwargs):\n return _gdi_.FontMapper_GetEncoding(*args, **kwargs)", "def getFontFace(self):\n return self.fontFace", "def encodings():\n from . import factory\n return factory.MAPPINGS.keys()", "def decoded(self):\n return self._decoded", "def get_charset(self, default: str) -> str:\n ...", "def getInputCharset( self, lang=None ):\n langinfo = getLanguageInfo( lang or self )\n return Charset( langinfo['mail_charset'], langinfo['python_charset'] )", "def _get_encoding_form(self, input):\n if self.inference_procedure == 'direct':\n return input\n else:\n raise NotImplementedError", "def GetEncodingName(*args, **kwargs):\n return _gdi_.FontMapper_GetEncodingName(*args, **kwargs)", "def GetEncodingFromName(*args, **kwargs):\n return _gdi_.FontMapper_GetEncodingFromName(*args, **kwargs)", "def audio_language(self):\n # type: () -> string_types\n return self._audio_language", "def encoded_value(self):\n return self._encoded_value", "def get_value(self, encoding: tp.Optional[str] = None) -> tp.Union[str, bytes]:\n with open(self.path, 'rb') as f_in:\n data = f_in.read()\n if encoding:\n return data.decode(encoding)\n else:\n return data", "def current_bytes(self):\n return self._current_bytes", "def get_xml_encoding(source):\n with get_xml_iterator(source) as iterator:\n start, tag, data, pos = iterator.next()\n if not start or tag != u'xml':\n raise IOError('Invalid XML file')\n\n return data['encoding']", "def charset(self) -> Optional[str]:\n raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore[attr-defined]\n if self._stored_content_type != raw:\n self._parse_content_type(raw)\n return self._content_dict.get(\"charset\") # type: ignore[union-attr]", "def __originalLanguage(self):\n return self.origLanguageComboBox.itemData(\n self.origLanguageComboBox.currentIndex())" ]
[ "0.7560038", "0.75277394", "0.74015206", "0.73925185", "0.73925185", "0.73925185", "0.73925185", "0.7173765", "0.7009836", "0.69050586", "0.6849858", "0.6755228", "0.6746344", "0.6715124", "0.65442663", "0.65063953", "0.64930034", "0.64095986", "0.63821363", "0.63166624", "0.62670225", "0.62670225", "0.62130684", "0.610967", "0.60888326", "0.60231507", "0.60031325", "0.5975061", "0.59433603", "0.59201086", "0.5917587", "0.59144175", "0.58977884", "0.58712286", "0.5846752", "0.5817324", "0.58143187", "0.58123744", "0.5807727", "0.5767319", "0.5764855", "0.57593155", "0.5757354", "0.5751786", "0.57371914", "0.57276", "0.5726417", "0.5723362", "0.5718631", "0.5710924", "0.56498384", "0.5634937", "0.5634937", "0.563294", "0.5606158", "0.5600843", "0.55810577", "0.55706024", "0.5549141", "0.5540546", "0.553546", "0.5524331", "0.55057365", "0.54886883", "0.5483952", "0.5483585", "0.5447615", "0.5439138", "0.5423718", "0.5391125", "0.5374437", "0.5367722", "0.5274188", "0.52719855", "0.52660257", "0.52487427", "0.5239647", "0.52363616", "0.5234107", "0.5226075", "0.5218455", "0.521348", "0.52059025", "0.52035165", "0.519742", "0.5185389", "0.51623255", "0.51619893", "0.51583266", "0.5154945", "0.51359195", "0.5124728", "0.5122028", "0.51056117", "0.51041996", "0.51004887", "0.50894123", "0.50746506", "0.507276", "0.50719583" ]
0.73403645
7
Gets the current max_single_prediction_time
def _get_max_single_prediction_time(self): self._validate_max_single_prediction_time() if self.max_single_prediction_time is None: if self._get_mode() == "Perform": return 0.5 # prediction time should be under 0.5 second return None else: return deepcopy(self.max_single_prediction_time)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_simulated_time(self):\n\n query = \"SELECT MAX(time) FROM patient_signal_values\"\n\n return self.mysql_obj.fetch_value(query)", "def max_time(self):\n #{{{ function to return time of last sample\n\n if self.maxtime == -1:\n return stock.now()\n\n return self.maxtime", "def max_time(self):\n return self._max_time", "def max_time(self):\n return self.time[np.argmax(self.flux)]", "def max_time(self) -> float:\r\n if(len(self.operations_by_name) == 0):\r\n return -1\r\n return max(map(lambda x: x[\"time_step\"], self.operations_by_name.values()))", "def max_time(self):\n return self._ll_tree_sequence.get_max_time()", "def getMaxSimTime(self):\n return self.max_simsecs_value", "def _get_detection_time_multiplier(self):\n return self.__detection_time_multiplier", "def get_inference_time(self):\n return self._engine.get_inference_time()", "def getDefaultTime(self):\n return max(tvp[0] for tvp in self.timeValuePairs)", "def max(self):\n\n return time_stat(self, stat=\"max\")", "def last_tick_time(self):\n return self.last_tick_", "def _get_max_t(self):\n\n return max([\n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n ])", "def time_to_target_training(self) -> str:\r\n # TODO: Figure out how to implement this.\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"time_to_target_training\"))\r\n return self._training_modes[0]", "def _get_last_meas_time(self):\n\n #if flag for whole data regeneration is set\n if self._process_type == 'full_gen':\n return datetime.datetime(1900, 1, 1, 0, 0, 0)\n \n \n res = self._db.Query(\"\"\"SELECT last_measurement_time\n FROM last_dashboard_element_segment_value\n WHERE\n element_id = %s\n AND segment_value_id = %s\n \"\"\",(self._id, self._segment_value_id))\n if not res:\n return datetime.datetime(1900, 1, 1, 0, 0, 0)\n item = self._db.record[0]\n if item['last_measurement_time']:\n return item['last_measurement_time']\n return datetime.datetime(1900, 1, 1, 0, 0, 0)", "def getLastestTime(self):\n if not self.cache_times:\n return None\n return self.cache_times[-1]", "def get_last(self):\n self.accumulated_time_last = pg.time.get_ticks() - self.start_time_last\n return self.accumulated_time_last", "def max_retire_time(self):\n return self._max_retire_time", "def _get_max_t(self):\n \"\"\"\n if hasattr(self,'k_of_t'):\n return max([ \n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n self.k_of_t[-1][0],\n ])\n else:\n return max([ \n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n ])\n \"\"\"\n return self.t_max", "def get_max_end_time(self):\n max_end_time = 1.\n file = h5py.File(self.filename, 'r')\n for idx in range(len(self)):\n label = self.labels[idx]\n timestamps_group = file['/'][self.mode + '_timestamps']\n timestamps_dset = timestamps_group[label]\n end_time = timestamps_dset[-1]\n if end_time > max_end_time: max_end_time = end_time\n file.close()\n return max_end_time", "def get_time(self) -> float:\n raise NotImplementedError()", "def get_last_time(self):\n \n return self._last", "def max_delay_time(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"max_delay_time\")", "def computation_time(self) -> float:\r\n if self._computation_times is None:\r\n return None\r\n else:\r\n return self._computation_times[0]", "def max_time(self) -> str:\n return self._max_time", "def last_time(self) -> datetime:\n return self.activities[-1].timestamp", "def _get_max_suppress_time(self):\n return self.__max_suppress_time", "def get_time(self):\n return self.get_timed() / 10.0", "def min_time(self):\n #{{{ function to return time of first sample\n\n return self.mintime", "def get_time(self):\n clock = self.pipeline.get_clock()\n tm = clock.get_internal_time()\n return tm / 1.e9", "def posterior_forecast(self):\n return self.posterior_prediction", "def get_min_tim(self):\n return self.get_shortest_mode().tim", "def get_last_timestamp(self):\n return self._frame_timestamp", "def max_delay_time(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_delay_time\")", "def max_delay_time(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_delay_time\")", "def getTime(self):\n return self.step / (self.max_step + int(self.include))", "def lasttime(self):\n if hasattr(self, \"_lasttime\"):\n return self._lasttime\n else:\n return None", "def endTime(self) -> float:\n try: return self.times[-1]\n except IndexError: return 0.0", "def last_optime(self):\n return self._last_optime", "def get_predicted_time(self):\n estPatches = 0\n for row in self.regionDict:\n for minX, maxX in self.regionDict[row]:\n estPatches += (maxX-minX)/(STEP_SIZE/self.ratio[0])\n\n estTime = (estPatches/300.)*15 # Assume 10 sec per 300 patches (3 sec to generate, 7 sec to predict)\n return estTime/60. # Return minutes", "def getTime(self) -> float:\n return self.t", "def time_to_process_last_submission(self) -> int:\n return self.snapshot['time_to_process_last_submission']", "def get_last_spot_deposit_time(self) -> int:\n table = tables.SPOT_DEPOSIT_TABLE\n selection = f\"MAX({table.insertTime})\"\n result = self.get_conditions_rows(table,\n selection=selection)\n\n default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))\n try:\n result = result[0][0]\n except IndexError:\n return default\n if result is None:\n return default\n return result", "def t(self):\n return self._data_writer.get_current_run_time_ms()", "def get_last_spot_dividend_time(self) -> int:\n table = tables.SPOT_DIVIDEND_TABLE\n selection = f\"MAX({table.divTime})\"\n result = self.get_conditions_rows(table,\n selection=selection)\n\n default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))\n try:\n result = result[0][0]\n except IndexError:\n return default\n if result is None:\n return default\n return result", "def __last_time(self):\n if self.__stopped is not None:\n return self.__stopped\n return self.__time()", "def get_last_timestep(self):\n d = DataFrame(list(self.svalin_db.find({}, {'time'})))\n d.index = d.time\n last_time = convert_datetime(d.time.values[-1])\n return last_time", "def output_time(self):\n return self._output_time", "def _get_time(self, state: State) -> int:\n benchmark_time = {\n 'resnet': state.timestamp.epoch.value,\n 'bert': state.timestamp.sample.value,\n }\n return benchmark_time[self.benchmark]", "def _get_total_time_limit(self):\n self._validate_total_time_limit()\n if self._get_mode() == \"Optuna\":\n return None # there no training limit for model in the Optuna mode\n # just train and be happy with super models :)\n return deepcopy(self.total_time_limit)", "def get_timeval():\n return convert_timeval(time.time())", "def time(self) -> float:\n return self.sim_scene.data.time", "def _get_current_epoch_time() -> float:\n return time.time()", "def time(self) -> float:\n return self._time", "def _get_target(self, prediction: Tensor) -> Tensor:\n if self.model_config.mode == ModelMode.binary_classification:\n # TODO: Allow customization of the thresholds used below.\n if self.model_config.return_type.value == 'raw':\n return (prediction > 0).long().view(-1)\n if self.model_config.return_type.value == 'probs':\n return (prediction > 0.5).long().view(-1)\n assert False\n\n if self.model_config.mode == ModelMode.multiclass_classification:\n return prediction.argmax(dim=-1)\n\n return prediction", "def get_last_spot_withdraw_time(self) -> int:\n table = tables.SPOT_WITHDRAW_TABLE\n selection = f\"MAX({table.applyTime})\"\n result = self.get_conditions_rows(table,\n selection=selection)\n default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))\n try:\n result = result[0][0]\n except IndexError:\n return default\n if result is None:\n return default\n return result", "def last_update_time_in_minutes(self):\n return self._last_update_time_in_minutes", "def get_time(self):\n return self._current_time_sec", "def get_last_saved_estimation(self):\n return None", "def get_last_update_time(self):\n return self.last_update_time", "def gettime(self):\n return self.t", "def get_tmax(data):\n return data[np.argmax(data[:, 1])][0]", "def native_max_value(self) -> float:\n return TEMP_MAXIMUM", "def maxtime(conn):\n c = conn.cursor()\n r = c.execute(\"SELECT max(time) as max_time FROM event WHERE bin_id not null\").fetchall()\n last_time = r[0]['max_time']\n return last_time", "def get_maxdelay(self) -> float:\n return self.maxdelay", "def get_best_times(self):\n\n return self.best_times", "def time_threshold(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"time_threshold\")", "def time_threshold(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"time_threshold\")", "def initialTime(self):\n return self.params['t0']", "def last_value(self):\n return self.samples[-1]", "def getLatestMeasurement(self): \n return self.measurement[len(self.measurement)-1]", "def find_tim(self):\n start_max = 0\n finish_max = 0\n op_mode = self.op_number + ',' + self.mode_number\n for resource in self.resources:\n end_time = resource.usage[op_mode][\"start_time\"] + resource.usage[op_mode][\"duration\"]\n if end_time > finish_max:\n finish_max = end_time\n start_max = resource.usage[op_mode][\"start_time\"]\n self.tim = finish_max\n self.sim = start_max", "def get_time_step(self):\n return self._time_step", "def _get_half_time(self):\n return self.__half_time", "def __current_milli_time(self):\n\n return int(round(time.time() * 1000))", "def __get_timeval():\n return convert_timeval(time.time())", "def time_threshold(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"time_threshold\")", "def last_update(self) -> Optional['outputs.CSIPowerMaxStatusLastUpdate']:\n return pulumi.get(self, \"last_update\")", "def latestValue(self):\n if len(self.values) > 0:\n return self.values[-1]\n else:\n return 0", "def get_time(self):\n return self.time", "def last_timestamp(self):\n return self._last_timestamp", "def time_return(self):\n return self.time", "def get_time(self):\n return self.time_param", "def last_peak_delta(self):\n return self._p1_time - self._p2_time", "def current_time(cls) -> float:", "def _first_presence_time(self) -> float:\n return self.population.presence_interval().boundaries()[0][0]", "def get_last_match_time():\n\n query = '''SELECT max(reported_at) FROM matches'''\n\n record = db.read(query)\n last_match_time = record[0][0]\n\n return last_match_time", "def get_time(self) -> float:\n return self.player.time", "def get_last_sample(self) -> InternalSample:", "def last_transition_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"last_transition_time\")", "def speed_max(self):\n return self._speed_max", "def get_prediction(self):\n return self.prediction", "def get_max_time_vals(train_results):\n t_tr, t_te, t_lo, t_re = 0, 0, 0, 0\n for tres in train_results:\n t_tr += tres.time_train\n t_te += tres.time_test\n t_lo += tres.time_load\n t_re += tres.time_reduce\n n = len(train_results)\n return t_tr/n, t_te/n, t_lo/n, t_re/n", "def get_time(self):\n return self._current_time", "def get_time(self):\n return self._current_time", "def get_time(self):\n return self._total_time", "def GetTimePrecision():\n return _gmat_py.GmatBase_GetTimePrecision()", "def estimated_latency_to_mote(self):\n return self._estimated_latency_to_mote", "def get_frame_time(self):\n return self.get_timings().frame_time", "def get_last_lr(self) -> Tensor:\n\n return self.lr_scheduler.get_last_lr()" ]
[ "0.713428", "0.7000223", "0.6750113", "0.67000544", "0.66799814", "0.664459", "0.6637974", "0.65101796", "0.64204127", "0.63254064", "0.63026834", "0.62825453", "0.62509656", "0.6248696", "0.6228762", "0.6212105", "0.61944056", "0.6155345", "0.614388", "0.61273146", "0.6119531", "0.6035327", "0.60263294", "0.6002067", "0.60004765", "0.5997111", "0.59815276", "0.5949207", "0.5942173", "0.5938999", "0.5930184", "0.59231216", "0.5922193", "0.5911817", "0.5911817", "0.5910964", "0.5906301", "0.58895856", "0.58858895", "0.5879635", "0.58606684", "0.5854608", "0.5851881", "0.5845328", "0.58321613", "0.58276045", "0.58267546", "0.57964", "0.5764742", "0.5755398", "0.5754807", "0.5730976", "0.5730436", "0.5715227", "0.5714168", "0.5714111", "0.57097137", "0.5699963", "0.56969154", "0.5695172", "0.56939244", "0.56920916", "0.5689216", "0.56784153", "0.5675978", "0.5675199", "0.56744975", "0.56744975", "0.56719005", "0.56678975", "0.5666326", "0.5664648", "0.56598085", "0.5656725", "0.56520027", "0.56407267", "0.5633632", "0.56314725", "0.5630702", "0.56248266", "0.56241035", "0.56216383", "0.5614478", "0.56056917", "0.56020033", "0.56006986", "0.55965227", "0.5595715", "0.5589188", "0.55875045", "0.5585645", "0.5585637", "0.5584648", "0.5579117", "0.5579117", "0.55786824", "0.55701905", "0.55691886", "0.55690116", "0.5567941" ]
0.903874
0
Gets the current optuna_time_budget
def _get_optuna_time_budget(self): self._validate_optuna_time_budget() if self.optuna_time_budget is None: if self._get_mode() == "Optuna": return 3600 return None else: if self._get_mode() != "Optuna": # use only for mode Optuna return None return deepcopy(self.optuna_time_budget)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def budget(self):\n return self._budget", "def last_optime(self):\n return self._last_optime", "def adoption_time(self):\n return self._adoption_time", "def get_time_step_to_enqueue(self):\n return self.time_step_to_enqueue", "def _get_total_time_limit(self):\n self._validate_total_time_limit()\n if self._get_mode() == \"Optuna\":\n return None # there no training limit for model in the Optuna mode\n # just train and be happy with super models :)\n return deepcopy(self.total_time_limit)", "def get_time(self):\n return self.run_command('get_time')[0]", "def get_current_time_lag_min(self):\n self.current_time_lag_min = self.get_timelag()[0] // 60", "def gettime(self):\n return self.t", "def getSelectedShowtime(self):\n\n cur = self.current()\n if cur < 0:\n return None\n else:\n return self.theater.showtimes(self.showtimeIds[cur])", "def current_time_step(self) -> ts.TimeStep:\n return self._current_time_step", "def get_t(self):\n return self.t", "def pending_time(self):\n now = datetime.datetime.utcnow().replace(tzinfo=utc)\n timediff = now - self.time_requested\n return timediff", "def get(self):\n if self.running:\n return self.accumulated_time + pg.time.get_ticks() - self.start_time\n else:\n return self.accumulated_time", "def free_flight_time(self):\n return self._free_flight_time", "def dep_time(self):\n return self._dep_time", "def get_current_timeout(cls):\n return cls.current().get_timeout()", "def get(self):\n now = datetime.datetime.utcnow()\n if now > self.time_of_next_update:\n self._update_value()\n return self.value", "def get_budget(self, category: BudgetCategory) -> Budget:\n return self.budgets.get(category, None)", "def brasilia_time():\n brasilia_time = pd.Timestamp.now('UTC') - pd.Timedelta(hours=3)\n return brasilia_time", "def getTime(self):\n return self.time", "def current_period(self):\n return self._current_period", "def get_time(self):\n return self._total_time", "def get_last(self):\n self.accumulated_time_last = pg.time.get_ticks() - self.start_time_last\n return self.accumulated_time_last", "def time(self, k):\n \n it = Historial.__getitem__(self, k)\n if it != None:\n return it[0]\n else:\n return None", "def getTime(self) -> float:\n return self.t", "def time_budget(self, mode):\n\n def time_budget_analysis(cursor, plot_parameters, by_category=False):\n \"\"\"\n extract number of occurrences, total duration, mean ...\n if start_time = 0 and end_time = 0 all events are extracted\n \"\"\"\n\n categories, out = {}, []\n for subject in plot_parameters[\"selected subjects\"]:\n out_cat, categories[subject] = [], {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\"SELECT distinct modifiers FROM events WHERE subject = ? AND code = ?\",\n (subject, behavior))\n distinct_modifiers = list(cursor.fetchall())\n\n if not distinct_modifiers:\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n if POINT in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = cursor.fetchall()\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = list(cursor.fetchall())\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": modifier[0], \"duration\": UNPAIRED,\n \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\n \"end time\"] and plot_parameters[\"start time\"] <= rows[idx + 1][0] <= \\\n plot_parameters[\"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n # all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations),\n 3) if len(all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(\n all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(\n statistics.mean(all_event_interdurations), 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n else: # no modifiers\n\n if POINT in self.eventType(behavior).upper():\n\n # if len(selectedObservations) > 1:\n cursor.execute(\n \"SELECT occurence,observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n\n if len(selectedObservations) == 1:\n new_rows = []\n for occurence, observation in rows:\n new_occurence = max(float(plot_parameters[\"start time\"]), occurence)\n new_occurence = min(new_occurence, float(plot_parameters[\"end time\"]))\n new_rows.append([new_occurence, observation])\n rows = list(new_rows)\n\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(statistics.stdev(all_event_interdurations),\n 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n\n cursor.execute(\n \"SELECT occurence, observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]: # include behaviors without events\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": \"-\", \"duration\": 0, \"duration_mean\": 0,\n \"duration_stdev\": \"NA\", \"number\": 0, \"inter_duration_mean\": \"-\",\n \"inter_duration_stdev\": \"-\"})\n continue\n\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior, \"modifiers\": \"NA\",\n \"duration\": UNPAIRED, \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\"end time\"] and \\\n plot_parameters[\"start time\"] <= rows[idx + 1][0] <= plot_parameters[\n \"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations), 3) if len(\n all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n out += out_cat\n\n if by_category: # and flagCategories:\n\n for behav in out_cat:\n\n try:\n category = [self.pj[ETHOGRAM][x][\"category\"] for x in self.pj[ETHOGRAM] if\n \"category\" in self.pj[ETHOGRAM][x] and self.pj[ETHOGRAM][x][\"code\"] == behav[\n 'behavior']][0]\n except:\n category = \"\"\n\n if category in categories[subject]:\n if behav[\"duration\"] not in [\"-\", \"NA\"] and categories[subject][category][\n \"duration\"] != \"-\":\n categories[subject][category][\"duration\"] += behav[\"duration\"]\n else:\n categories[subject][category][\"duration\"] = \"-\"\n categories[subject][category][\"number\"] += behav[\"number\"]\n else:\n categories[subject][category] = {\"duration\": behav[\"duration\"], \"number\": behav[\"number\"]}\n\n out_sorted = []\n for subject in plot_parameters[\"selected subjects\"]:\n for behavior in plot_parameters[\"selected behaviors\"]:\n for row in out:\n if row[\"subject\"] == subject and row[\"behavior\"] == behavior:\n out_sorted.append(row)\n\n ### http://stackoverflow.com/questions/673867/python-arbitrary-order-by\n return out_sorted, categories\n\n def default_value(behav, param):\n \"\"\"\n return value for duration in case of point event\n \"\"\"\n default_value_ = 0\n if ({self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behav} == {\"Point event\"}\n and param in [\"duration\"]):\n default_value_ = \"-\"\n return default_value_\n\n def init_behav_modif():\n \"\"\"\n initialize dictionary with subject, behaviors and modifiers\n \"\"\"\n behaviors = {}\n for subj in plot_parameters[\"selected subjects\"]:\n behaviors[subj] = {}\n for behav_modif in distinct_behav_modif:\n behav, modif = behav_modif\n if behav not in behaviors[subj]:\n behaviors[subj][behav] = {}\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n behaviors[subj][behav][param[0]] = default_value(behav, param[0])\n\n if plot_parameters[\"include modifiers\"]:\n behaviors[subj][behav][modif] = {}\n for param in parameters:\n behaviors[subj][behav][modif][param[0]] = default_value(behav, param[0])\n\n return behaviors\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out = \"\"\n not_paired_obs_list = []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n\n if out:\n out = \"Some observations have UNPAIRED state events<br><br>\" + out\n self.results = dialog.Results_dialog()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.pbSave.setVisible(False)\n self.results.pbCancel.setVisible(True)\n\n if not self.results.exec_():\n return\n\n flagGroup = False\n if len(selectedObservations) > 1 and mode != \"synthetic\":\n flagGroup = dialog.MessageDialog(programName, \"Group observations in one time budget analysis?\",\n [YES, NO]) == YES\n\n '''\n # check if state events are paired\n out = \"\"\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId],\n self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n '''\n\n selectedObsTotalMediaLength = Decimal(\"0.0\")\n max_obs_length = 0\n for obsId in selectedObservations:\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n logging.debug(\"media length for {0}: {1}\".format(obsId, obs_length))\n\n if obs_length in [0, -1]:\n selectedObsTotalMediaLength = -1\n break\n max_obs_length = max(max_obs_length, obs_length)\n\n selectedObsTotalMediaLength += obs_length\n\n # an observation media length is not available\n if selectedObsTotalMediaLength == -1:\n # propose to user to use max event time\n if dialog.MessageDialog(programName,\n \"A media length is not available.<br>Use last event time as media length?\",\n [YES, NO]) == YES:\n maxTime = 0 # max length for all events all subjects\n for obsId in selectedObservations:\n if self.pj[OBSERVATIONS][obsId][EVENTS]:\n maxTime += max(self.pj[OBSERVATIONS][obsId][EVENTS])[0]\n logging.debug(\"max time all events all subjects: {}\".format(maxTime))\n selectedObsTotalMediaLength = maxTime\n else:\n selectedObsTotalMediaLength = 0\n\n logging.debug(\"selectedObsTotalMediaLength: {}\".format(selectedObsTotalMediaLength))\n\n if mode in [\"by_behavior\", \"by_category\"]:\n if len(selectedObservations) > 1:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n by_category=(mode == \"by_category\"))\n else:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=selectedObsTotalMediaLength,\n by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n flagShowExcludeBehaviorsWoEvents=False,\n by_category=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n # check if time_budget window must be used\n if mode in [\"by_behavior\", \"by_category\"] and (flagGroup or len(selectedObservations) == 1):\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n total_observation_time = 0\n for obsId in selectedObservations:\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n logging.debug(\"distinct_modifiers: {}\".format(distinct_modifiers))\n\n for modifier in distinct_modifiers:\n\n logging.debug(\"modifier #{}#\".format(modifier[0]))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n total_observation_time += (max_time - min_time)\n\n cursor.execute(\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n # widget for results visualization\n self.tb = timeBudgetResults(logging.getLogger().getEffectiveLevel(), self.pj)\n\n # observations list\n self.tb.label.setText(\"Selected observations\")\n for obs in selectedObservations:\n self.tb.lw.addItem(obs)\n\n # media length\n if len(selectedObservations) > 1:\n if total_observation_time:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {}\".format(seconds2time(total_observation_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {:0.3f}\".format(float(total_observation_time)))\n else:\n self.tb.lbTotalObservedTime.setText(\"Total observation length: not available\")\n else:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {} to {}\".format(seconds2time(min_time), seconds2time(max_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {:0.3f} to {:0.3f} s\".format(float(min_time), float(max_time)))\n\n if mode == \"by_behavior\":\n\n tb_fields = [\"Subject\", \"Behavior\", \"Modifiers\", \"Total number\", \"Total duration (s)\",\n \"Duration mean (s)\", \"Duration std dev\", \"inter-event intervals mean (s)\",\n \"inter-event intervals std dev\", \"% of total length\"]\n\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\", \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for row in out:\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n column = 0\n for field in fields:\n item = QTableWidgetItem(str(row[field]).replace(\" ()\", \"\"))\n # no modif allowed\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n column += 1\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n item = QTableWidgetItem(str(round(row[\"duration\"] / float(total_observation_time) * 100, 1)))\n else:\n item = QTableWidgetItem(\"NA\")\n\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n if mode == \"by_category\":\n tb_fields = [\"Subject\", \"Category\", \"Total number\", \"Total duration (s)\"]\n fields = [\"number\", \"duration\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for subject in categories:\n\n for category in categories[subject]:\n\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n\n column = 0\n item = QTableWidgetItem(subject)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n column = 1\n if category == \"\":\n item = QTableWidgetItem(\"No category\")\n else:\n item = QTableWidgetItem(category)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n for field in fields:\n column += 1\n item = QTableWidgetItem(str(categories[subject][category][field]))\n item.setFlags(Qt.ItemIsEnabled)\n item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n self.tb.twTB.resizeColumnsToContents()\n\n self.tb.show()\n\n if mode in [\"by_behavior\", \"by_category\"] and (\n not flagGroup and len(selectedObservations) > 1) or mode == \"synthetic\":\n\n if mode in [\"by_behavior\", \"by_category\"]:\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma separated values (*.csv)\",\n \"OpenDocument Spreadsheet (*.ods)\",\n \"OpenDocument Workbook (*.ods)\",\n \"Microsoft Excel Spreadsheet (*.xlsx)\",\n \"Microsoft Excel Workbook (*.xlsx)\",\n \"HTML (*.html)\",\n \"Legacy Microsoft Excel Spreadsheet (*.xls)\")\n\n formats = [\"tsv\", \"csv\", \"od spreadsheet\", \"od workbook\", \"xlsx spreadsheet\", \"xlsx workbook\", \"html\",\n \"xls legacy\"]\n\n item, ok = QInputDialog.getItem(self, \"Time budget analysis format\", \"Available formats\", items, 0,\n False)\n if not ok:\n return\n\n outputFormat = formats[items.index(item)]\n extension = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n flagWorkBook = False\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" in outputFormat:\n workbook = tablib.Databook()\n flagWorkBook = True\n if \"xls\" in outputFormat:\n filters = \"Microsoft Excel Workbook *.xlsx (*.xlsx);;All files (*)\"\n if \"od\" in outputFormat:\n filters = \"Open Document Workbook *.ods (*.ods);;All files (*)\"\n\n if QT_VERSION_STR[0] == \"4\":\n WBfileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget analysis\",\n \"\", filters)\n else:\n WBfileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget analysis\", \"\",\n filters)\n if not WBfileName:\n return\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" not in outputFormat: # not workbook\n exportDir = QFileDialog(self).getExistingDirectory(self,\n \"Choose a directory to save the time budget analysis\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if mode == \"synthetic\":\n\n formats_str = (\"Tab Separated Values *.txt, *.tsv (*.txt *.tsv);;\"\n \"Comma Separated Values *.txt *.csv (*.txt *.csv);;\"\n \"Open Document Spreadsheet *.ods (*.ods);;\"\n \"Microsoft Excel Spreadsheet *.xlsx (*.xlsx);;\"\n # \"Pandas dataframe (*.df);;\"\n \"Legacy Microsoft Excel Spreadsheet *.xls (*.xls);;\"\n \"HTML *.html (*.html);;\"\n \"All files (*)\")\n\n while True:\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget report\",\n \"\", formats_str)\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget report\", \"\",\n formats_str)\n\n if not fileName:\n return\n\n extension = \"\"\n availableFormats = (\n \"tsv\", \"csv\", \"ods\", \"xlsx)\", \"xls)\", \"html\") # ) is added to distinguish between xls and xlsx\n for fileExtension in availableFormats:\n if fileExtension in filter_:\n extension = fileExtension.replace(\")\", \"\")\n if not extension:\n QMessageBox.warning(self, programName, \"Choose a file format\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n else:\n break\n\n data_report = tablib.Dataset()\n data_report.title = \"Synthetic time budget\"\n\n parameters = [[\"duration\", \"Total duration\"], [\"number\", \"Number of occurrences\"]]\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"],\n selectedObservations, plot_parameters[\"selected behaviors\"])\n\n cursor.execute(\"SELECT distinct code, modifiers FROM events WHERE subject in ({})\".format(\n \",\".join(\"?\" * len(plot_parameters[\"selected subjects\"]))),\n (plot_parameters[\"selected subjects\"]))\n\n distinct_behav_modif = [[rows[\"code\"], rows[\"modifiers\"]] for rows in cursor.fetchall()]\n\n # add selected behaviors that are not observed\n for behav in plot_parameters[\"selected behaviors\"]:\n if [x for x in distinct_behav_modif if x[0] == behav] == []:\n distinct_behav_modif.append([behav, \"-\"])\n\n behaviors = init_behav_modif()\n\n subj_header, behav_header, modif_header, param_header = [\"\", \"\"], [\"\", \"\"], [\"\", \"\"], [\"\",\n \"Total length (s)\"]\n # subj_header, behav_header, modif_header, param_header = [\"\"], [\"\"], [\"\"], [\"\"]\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n param_header.append(param[1])\n\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n modif_header.append(modif)\n param_header.append(param[1])\n\n data_report.append(subj_header)\n data_report.append(behav_header)\n if plot_parameters[\"include modifiers\"]:\n data_report.append(modif_header)\n data_report.append(param_header)\n\n if mode == \"by_behavior\":\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\",\n \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n\n if mode == \"by_category\":\n fields = [\"subject\", \"category\", \"number\", \"duration\"]\n\n for obsId in selectedObservations:\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], [obsId],\n plot_parameters[\"selected behaviors\"])\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n # if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n for modifier in distinct_modifiers:\n\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\"\"\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\"\"\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n cursor.execute(\"\"\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\"\"\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n\n behaviors = init_behav_modif()\n\n for element in out:\n for param in parameters:\n if not plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][param[0]] = element[param[0]]\n except:\n pass\n if plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][element[\"modifiers\"]][param[0]] = \\\n element[param[0]]\n except:\n pass\n\n columns = []\n columns.append(obsId)\n columns.append(\"{:0.3f}\".format(max_time - min_time))\n # columns.append([obsId])\n\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n columns.append(behaviors[subj][behav][param[0]])\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n columns.append(behaviors[subj][behav][modif][param[0]])\n\n data_report.append(columns)\n\n if mode in [\"by_behavior\", \"by_category\"]:\n rows = []\n # observation id\n rows.append([\"Observation id\", obsId])\n rows.append([\"\"])\n\n labels = [\"Independent variables\"]\n values = [\"\"]\n if INDEPENDENT_VARIABLES in self.pj and self.pj[INDEPENDENT_VARIABLES]:\n for idx in self.pj[INDEPENDENT_VARIABLES]:\n labels.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n if (INDEPENDENT_VARIABLES in self.pj[OBSERVATIONS][obsId]\n and self.pj[INDEPENDENT_VARIABLES][idx][\"label\"] in self.pj[OBSERVATIONS][obsId][\n INDEPENDENT_VARIABLES]):\n values.append(self.pj[OBSERVATIONS][obsId][INDEPENDENT_VARIABLES][\n self.pj[INDEPENDENT_VARIABLES][idx][\"label\"]])\n rows.append(labels)\n rows.append(values)\n rows.append([\"\"])\n\n rows.append(\n [\"Analysis from\", \"{:0.3f}\".format(float(min_time)), \"to\", \"{:0.3f}\".format(float(max_time))])\n rows.append([\"Total length (s)\", \"{:0.3f}\".format(float(max_time - min_time))])\n rows.append([\"\"])\n rows.append([\"Time budget\"])\n\n if mode == \"by_behavior\":\n\n rows.append(fields + [\"% of total length\"])\n # data.headers = fields + [\"% of total media length\"]\n\n for row in out:\n values = []\n for field in fields:\n values.append(str(row[field]).replace(\" ()\", \"\"))\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n # if row[\"duration\"] != \"-\" and row[\"duration\"] != 0 and row[\"duration\"] != UNPAIRED and selectedObsTotalMediaLength:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n if len(selectedObservations) > 1:\n values.append(round(row[\"duration\"] / float(selectedObsTotalMediaLength) * 100, 1))\n else:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n else:\n values.append(\"-\")\n\n rows.append(values)\n\n if mode == \"by_category\":\n rows.append = fields\n # data.headers = fields # + [\"% of total media length\"]\n for subject in categories:\n\n for category in categories[subject]:\n values = []\n values.append(subject)\n if category == \"\":\n values.append(\"No category\")\n else:\n values.append(category)\n\n values.append(categories[subject][category][\"number\"])\n values.append(categories[subject][category][\"duration\"])\n\n rows.append(values)\n\n data = tablib.Dataset()\n data.title = obsId\n for row in rows:\n data.append(complete(row, max([len(r) for r in rows])))\n\n if \"xls\" in outputFormat:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n\n if flagWorkBook:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n if \"xls\" in outputFormat:\n if len(data.title) > 31:\n data.title = data.title[:31]\n workbook.add_sheet(data)\n\n else:\n\n fileName = exportDir + os.sep + safeFileName(obsId) + \".\" + extension\n\n if outputFormat in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data.export(outputFormat)))\n\n if outputFormat == \"od spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.ods)\n\n if outputFormat == \"xlsx spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.xlsx)\n\n if outputFormat == \"xls legacy\":\n if len(data.title) > 31:\n data.title = data.title[:31]\n QMessageBox.warning(None, programName, (\n \"The worksheet name <b>{0}</b> was shortened to <b>{1}</b> due to XLS format limitations.\\n\"\n \"The limit on worksheet name length is 31 characters\").format(obsId, data.title),\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n with open(fileName, \"wb\") as f:\n f.write(data.xls)\n\n if mode == \"synthetic\":\n if extension in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data_report.export(extension)))\n if extension in [\"ods\", \"xlsx\", \"xls\"]:\n with open(fileName, \"wb\") as f:\n f.write(data_report.export(extension))\n\n if mode in [\"by_behavior\", \"by_category\"] and flagWorkBook:\n if \"xls\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.xlsx)\n if \"od\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.ods)", "def get_time(self):\n return self.time_param", "def get_time(self):\n return self._current_time", "def get_time(self):\n return self._current_time", "def solution_meeting_time(self):\n for s in self.vars_meeting_time:\n if self.vars_meeting_time[s].solution_value():\n return s\n return None", "def get_time_walking(self):\n return self.time_step_to_enqueue - self.time_entered", "def t(self):\n return self._t", "def t(self):\n return self._t", "def get_time(self):\n return self.widget().time()", "def get_delta_t(self):\n return self.gv.t_queue[self.gv.filter_max_bound - 1] \\\n - self.gv.t_queue[self.gv.filter_min_bound]", "def current(self):\n\t\treturn self.reading_set.latest(field_name='time')", "def get_time(self):\n return self.time", "def var(self):\n\n return time_stat(self, stat=\"var\")", "def get_time(self) -> float:\n raise NotImplementedError()", "def docked_time(self):\n return self._docked_time", "def used_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"used_time\")", "def used_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"used_time\")", "def get_timeperiod(self, object_name, user_key = None):\n\t\treturn self.get_object('timeperiod',object_name, user_key = user_key)", "def get_mfa(self):\n return self.totp.now()", "def get_current_simulated_time(self):\n\n query = \"SELECT MAX(time) FROM patient_signal_values\"\n\n return self.mysql_obj.fetch_value(query)", "def get_timeval():\n return convert_timeval(time.time())", "def GetTimeStep(self):\n time_step = None\n\n time_step = self._solver_collection.GetTimeStep()\n \n if not time_step is None:\n\n self.time_step = time_step\n\n return self.time_step", "def get_time(self):\n return self._current_time_sec", "def get(cls, approach):\n return approach.time.date()", "def getTau(self) -> float:\n return self.tau", "def _get_model_time_limit(self):\n self._validate_model_time_limit()\n return deepcopy(self.model_time_limit)", "def extract_goal_state(self):\n time = rospy.get_time()\n ref_time = time - self.last_time\n future_time = ref_time + self.update_rate\n\n # get state of future time in global trajectory\n return df.compute_output3D(self.global_solution, self.order, self.time[self.future_index], future_time)", "def PreferredLifeTime(self):\n if self.force_auto_sync:\n self.get('PreferredLifeTime')\n return self._PreferredLifeTime", "def draft_timings(self):\n return self._get(\"draft_timings\")", "def pending_time_descriptive(self):\n return get_time_descriptive(self.pending_time.seconds)", "def __get_timeval():\n return convert_timeval(time.time())", "def get_goal(self):\n self._pid_lock.acquire() # Acquire Lock\n rtn = self._goal\n self._pid_lock.release() # Release Lock\n\n return rtn", "def current_effective_deadline(cls) -> float:", "def current_time(cls) -> float:", "def time_limit(self):\n all_time_limit_updates = self.updates.exclude(\n time_limit_delta=timedelta())\n return self.time_limit_as_of_update(\n all_time_limit_updates.latest('id'))", "def getLastestTime(self):\n if not self.cache_times:\n return None\n return self.cache_times[-1]", "def get_time_step(self):\n return self._time_step", "def pc_work_time_var(self):\n return _spacegrant_swig.general_burster_2_sptr_pc_work_time_var(self)", "def get_time():\n return {\n 'timestamp': datetime.now()+ timedelta(hours=-1)\n }", "def get_time(self):\n return datetime.datetime.now(self.time_zone)", "def get_imeastime(self):\n return self.itime", "def time_to_target(self) -> str:\r\n # TODO: Figure out how to implement this.\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"time_to_target\"))\r\n return self._time_to_target_options[0]", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def getTimeDelay(*args):\n return args[0].TimeState.TimeDelay.time_delay", "def hold_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"hold_time\")", "def get_time():\n return datetime.datetime.now()", "def get_min_tim(self):\n return self.get_shortest_mode().tim", "def max_time(self):\n return self._max_time", "def get_current_time(self):\n return self.time", "def rt_dep_time(self):\n return self._rt_dep_time", "def budget_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"budget_name\")", "def timing_reference(self):\n return self._timing_reference", "def pc_work_time_var(self):\n return _spacegrant_swig.G3RUH_descramble_sptr_pc_work_time_var(self)", "def get_value(self, t, timestep_results):\n\n # Splice list up until `t + 1` to include the value at time step t\n last_timestep = t + 1\n if self.prev_timestep:\n last_timestep = last_timestep - 1\n\n time_scope = timestep_results[self.component_name][:last_timestep]\n\n # For nice function definitions, either return the last element, or return the full history.\n if self.with_history:\n return time_scope\n else:\n return time_scope[-1]", "def initialTime(self):\n return self.params['t0']", "def max_time(self) -> float:\r\n if(len(self.operations_by_name) == 0):\r\n return -1\r\n return max(map(lambda x: x[\"time_step\"], self.operations_by_name.values()))", "def time_to_decision(self):\n if self.offending_date is None or self.date_of_decision is None:\n return None\n else:\n return self.date_of_decision - self.offending_date", "def time(self):\n return self.time_array", "def curr_val(self, activation_time = -1):\n \n self.activation_time = activation_time\n if(self.activation_time < 0):\n return 0\n if self.model_type == 'neg_exp':\n # This model is p_0*e^(-beta*t)\n beta = math.log(1-self.precision)/self.t_max\n return self.potential*math.exp(beta*self.activation_time)\n elif self.model_type == 'alpha':\n # This model is p_0*a*t*e^(1-at) which better models the action potential\n # a is chosen such that the peak of the function occurs at self.fire/2\n # since the peak is at t = 1/a, then a = 2/self.fire\n alpha = 2/self.fire\n return self.potential*alpha*self.activation_time*math.exp(1-alpha*self.activation_time)", "def find_tim(self):\n start_max = 0\n finish_max = 0\n op_mode = self.op_number + ',' + self.mode_number\n for resource in self.resources:\n end_time = resource.usage[op_mode][\"start_time\"] + resource.usage[op_mode][\"duration\"]\n if end_time > finish_max:\n finish_max = end_time\n start_max = resource.usage[op_mode][\"start_time\"]\n self.tim = finish_max\n self.sim = start_max", "def used_time(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"used_time\")", "def get_time(self):\r\n\t\tactual_time = '{}, {} de {} del {} ({}:{} {} (UTC))'\r\n\t\tda_taim = actual_time.format(self.wday, self.day, self.mon,\r\n\t\t\t\t\t\t\t\t\t self.year, self.hour, self.min,\r\n\t\t\t\t\t\t\t\t\t self.day_or_night)\r\n\t\treturn da_taim", "def arr_time(self):\n return self._arr_time", "def solver_time(self):\n return self._stub.List(self._message).solver_time", "def changetau(self, tau):\n if tau == self.tau:\n return self\n elif tau < self.tau:\n return AsymptoticTimeInvariant(self.v[self.tau - tau: tau + self.tau - 1])\n else:\n v = np.zeros(2*tau-1)\n v[tau - self.tau: tau + self.tau - 1] = self.v\n return AsymptoticTimeInvariant(v)", "def getdifficulty(self):\n return self.proxy.getdifficulty()", "def get_report_tau_or_i(self):\r\n return self._arm.get_report_tau_or_i()", "def build_seconds_used(self):\n return self.get_data(\"build_seconds_used\")", "def time(self):\n return self[self.time_columns]", "def time(self):\n return self[self.time_columns]", "def opt_value(self):\n return self._opt_value", "def get_last_timestep(self):\n d = DataFrame(list(self.svalin_db.find({}, {'time'})))\n d.index = d.time\n last_time = convert_datetime(d.time.values[-1])\n return last_time", "def break_time(self):\n\t\ts = timedelta()\n\t\tfor i in xrange(1, len(self.toggles)-1, 2):\n\t\t\ts += self.toggles[i+1] - self.toggles[i]\n\n\t\t# If not working need to add the last period of time\n\t\tif not self.status():\n\t\t\ts += datetime.now() - self.toggles[-1]\n\t\treturn s" ]
[ "0.6425248", "0.6043277", "0.5918131", "0.57312304", "0.56376636", "0.55601627", "0.55194855", "0.54705125", "0.5403898", "0.53979033", "0.539061", "0.5377435", "0.53700995", "0.53586614", "0.53013504", "0.52734435", "0.52604705", "0.524631", "0.5189846", "0.5182123", "0.51480925", "0.51345056", "0.5129658", "0.51287425", "0.51148325", "0.51055187", "0.51004833", "0.5093716", "0.5093716", "0.50905025", "0.50898176", "0.50724334", "0.50724334", "0.5060653", "0.50591534", "0.5057145", "0.50536394", "0.50504", "0.5034312", "0.5030372", "0.5027157", "0.5027157", "0.5024183", "0.50229686", "0.50163925", "0.50141007", "0.5010252", "0.500659", "0.49958664", "0.49938807", "0.49914095", "0.4985457", "0.49840158", "0.4983078", "0.49732018", "0.49704662", "0.49673375", "0.49630818", "0.496112", "0.49597675", "0.49590608", "0.49572274", "0.49538723", "0.4947447", "0.49401498", "0.49363315", "0.49347803", "0.4926298", "0.4926298", "0.4926298", "0.4926142", "0.4925308", "0.49252918", "0.49220362", "0.49195835", "0.49171513", "0.49130183", "0.49123818", "0.49121943", "0.49116522", "0.49107087", "0.49101132", "0.49073374", "0.48977712", "0.48961037", "0.48957992", "0.4892756", "0.4888822", "0.48869884", "0.4883072", "0.48815563", "0.4879439", "0.48774078", "0.48773047", "0.4876415", "0.48739606", "0.48739606", "0.48728365", "0.48713893", "0.48699585" ]
0.8460654
0
Gets the current optuna_init_params
def _get_optuna_init_params(self): self._validate_optuna_init_params() if self._get_mode() != "Optuna": # use only for mode Optuna return {} return deepcopy(self.optuna_init_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getInitParams(self):\n return {}", "def _get_current_training_params(self):\n params = {}\n params[\"lyap_relu_params\"] = copy.deepcopy(\n self.lyapunov_hybrid_system.lyapunov_relu.state_dict())\n if not self.R_options.fixed_R:\n params[\"R_params\"] = self.R_options._variables.clone()\n if isinstance(self.lyapunov_hybrid_system.system,\n feedback_system.FeedbackSystem):\n params[\"controller_params\"] = copy.deepcopy(\n self.lyapunov_hybrid_system.system.controller_network.\n state_dict())\n return params", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['p'] = self.p\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['apex' ] = self.apex\n paramDict['min' ] = self.min\n paramDict['max' ] = self.max\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['p'] = self.p\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['p'] = self.p\n return paramDict", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict", "def initial_parameters(self):\n return self._initial_parameters", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mu' ] = self.mu\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar\n paramDict['k' ] = self.k\n paramDict['low' ] = self.low\n return paramDict", "def init_opt(self):\n raise NotImplementedError", "def init_opt(self):\n raise NotImplementedError", "def init_opt(self):\n raise NotImplementedError", "def init_opt(self):\n raise NotImplementedError", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def init_kwargs(self):\n return {\"variant\": self.variant}", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['n' ] = self.n\n paramDict['p' ] = self.p\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low'] = self.low\n paramDict['alpha'] = self.alpha\n paramDict['beta'] = self.beta\n return paramDict", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['strategy'] = self.strategy\n paramDict['nPoints'] = self.nPoints\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar # rate parameter\n paramDict['low' ] = self.low # lower domain boundary\n return paramDict", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['workingDir'] = self.workingDir\n paramDict['dataFilename'] = self.dataFilename\n paramDict['functionID'] = self.functionID\n paramDict['functionType'] = self.functionType\n paramDict['variableID'] = self.variableID\n paramDict['k'] = self.k\n paramDict['s'] = self.s\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['location'] = self.location\n paramDict['scale' ] = self.scale\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['location'] = self.location\n paramDict['scale' ] = self.scale\n return paramDict", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['base'] = self.base\n return paramDict", "def getInitParams(self):\n paramDict = {}\n paramDict['upperBoundUsed' ] = self.upperBoundUsed\n paramDict['lowerBoundUsed' ] = self.lowerBoundUsed\n paramDict['hasInfiniteBound'] = self.hasInfiniteBound\n paramDict['upperBound' ] = self.upperBound\n paramDict['lowerBound' ] = self.lowerBound\n paramDict['adjustmentType' ] = self.__adjustmentType\n paramDict['dimensionality' ] = self.dimensionality\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low' ] = self.low\n paramDict['high' ] = self.high\n paramDict['alpha'] = self.alpha\n paramDict['beta' ] = self.beta\n return paramDict", "def get_params(self):\n return {\n \"nspecies\": self.nspecies,\n \"lmax\": self.lmax,\n \"nmax\": self.nmax,\n \"rcut\": self.rcut,\n \"sigma\": self.sigma,\n \"trans_width\": self.trans_width\n }", "def load_params(self):\n return self.params", "def get_init_params(self):\n return PgInput.get_init_params(self) + [EnstoreTapeDriveInput.VERSION_ATTRIBUTE,\n 'InputMinInterval', 'InputDelay', 'CheckpointRollback']", "def get_tunable_params(self):\n return self._tunable_params", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n paramDict['low'] = self.low\n return paramDict", "def init_params(self):\n self.params = Parameters()\n self.params.add('qoff', self.qoff, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('yscale', self.yscale, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('int_bg', self.int_bg, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('Rc', self.Rc, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('sur_den', self.sur_den, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('ion_depth', self.ion_depth, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)", "def getParameters(self):\n\n current_params = {'taux': self.taux, 'mu': self.mu, 'G': self.G, 'alpha_0': self.alpha_0,\n 'delta': self.delta, 'p': self.p, 'I0': self.I0, 'kparam': self.kparam}\n\n return (current_params)", "def get_params (self):\n return self.params", "def get_params (self):\n return self.params", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['range'] = self.range\n return paramDict\n # no other additional parameters required", "def get_params(self):\n return self.params", "def get_params(self):\n return self.params", "def get_params(self):\n return self.params", "def get_params(self):\n\n return None", "def get_params(self):\n\n return None", "def get_params(self):\n\n return None", "def _get_params(self):\r\n return self.k._get_params()", "def get_params(self, deep=True):\n params = super().get_params()\n\n params['a'] = self._a\n params['eta'] = self._eta\n\n return params", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n return paramDict", "def init_params(self):\n blah", "def get_params(self):\n return {'threshold': self.threshold,\n 'subsample': self.subsample,\n 'estimator': self.estimator,\n 'n_folds': self.n_folds,\n 'stratify': self.stratify,\n 'random_state': self.random_state,\n 'n_jobs': self.n_jobs}", "def params(self):\n\t\treturn self.params_", "def _get_params_for_run(self):\n if self._optimizer is not None:\n return self._optimizer.get_next_params()\n else:\n return self._params", "def get_main_params(self):\n return self.get_section_config('main')", "def get_params(self) -> np.array:\n pass", "def parameters(self):\n return self._default_params", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['functionType'] = self.functionType\n paramDict['dataFilename'] = self.dataFilename\n paramDict['workingDir'] = self.workingDir\n return paramDict", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['method'] = self.method\n paramDict['dimension'] = self.dimension\n paramDict['rank'] = self.rank\n paramDict['mu'] = self.mu\n paramDict['covariance'] = self.covariance\n return paramDict", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def fetch_initial_params(self, run_id, backend=None, results_dir=None, attributes_dump_name=None):\n model_attributes = self._fetch_model_attributes(run_id, backend, results_dir, attributes_dump_name)\n return model_attributes['initial_params']", "def get_params(self):\n\n return self.params_", "def get_params(self):\n return self.arr", "def get_optimization_parameters(self):\n pass", "def get_params(self, deep=False):\n return {\"alpha\": self.alpha, \"beta\": self.beta, \"gamma\": self.gamma, \"W\": self.W, \"bias\": self.bias, \"add_bias\": self.add_bias, \"opts\": self.opts}", "def get_params(self):\n pass", "def api_params(self):\n return self._api_params", "def get_init_params(exploration_id):\n exploration = get_exploration_by_id(exploration_id)\n\n # Note that the list of parameter changes is ordered. Parameter changes\n # later in the list may depend on parameter changes that have been set\n # earlier in the same list.\n new_params = {}\n for pc in exploration.param_changes:\n obj_type = exploration.get_obj_type_for_param(pc.name)\n new_params[pc.name] = pc.get_normalized_value(obj_type, new_params)\n return new_params", "def params(self):\n return {'kernel_size': self.kernel_size,\n 'stride': self.stride,\n 'n_kernel': self.n_kernel,\n 'padding': self.padding,\n 'act_fn': self.act_fn,\n 'w_init_fn': self.w_init_fn,\n 'resize': self.resize,\n 'use_bias': self.use_bias,\n 'atrous': self.atrous,\n 'idx': self.idx}", "def params(self):\n return self._pars", "def get_injected_params(self):\n if 'data_params' in self.all_params.keys():\n if self.all_params['data_params'] is not None:\n data_params = {}\n for pkey in self.all_params['data_params'].keys():\n data_params[pkey] = \\\n self.all_params['data_params'][pkey]['value']\n else:\n data_params = None\n else:\n data_params = None\n return data_params", "def get_params(self):\n return {\n 'dropout': self._dropout,\n 'layer_size': self._layer_size,\n 'num_layers': self._num_layers,\n 'embedding_layer_size': self._embedding_layer_size,\n 'controller_type': self._controller_type\n }", "def default_initial_params(self) -> numpy.ndarray:\n\n total_time = self.adiabatic_evolution_time\n step_time = total_time / self.iterations\n hamiltonian = self.hamiltonian\n\n params = []\n for param in self.params():\n if param.letter == 'U':\n p, i = param.subscripts\n params.append(_canonicalize_exponent(\n -0.5 * self.orbital_energies[p] * step_time / numpy.pi, 2))\n else:\n p, q, i = param.subscripts\n # Use the midpoint of the time segment\n interpolation_progress = 0.5 * (2 * i + 1) / self.iterations\n params.append(_canonicalize_exponent(\n -2 * hamiltonian.two_body[p, q] * interpolation_progress *\n step_time / numpy.pi, 2))\n\n return numpy.array(params)", "def get_raw_params(self):\n return self.settings[\"params\"]", "def _get_current_hyperparameters(self):", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['transition'] = self.transition\n paramDict['steadyStatePb'] = self.steadyStatePb\n return paramDict", "def training_opts(self):\n return self._training_opts", "def _core_init_params(self) :\n\t\ta_list,b_list = [],[]\n\t\tg_list,h_list = [],[]\n\t\t\n\t\t\n\t\tfor eqnid,eqn in enumerate(self.equations) : \n\t\t\treg_p = self.regressors[eqnid]['prod']\n\t\t\treg_d = self.regressors[eqnid]['degrad']\n\t\t\th_eqn = self.initsol['h'][eqn-1]\n\t\t\tg_eqn = self.initsol['g'][eqn-1]\n\n\n\t\t\ta_list.append(self.initsol['alpha'][eqn-1])\n\t\t\tb_list.append(self.initsol['beta'][eqn-1])\n\t\t\t\n\t\t\tg_eqn = np.array([g_eqn[reg-1] for reg in reg_p])\n\t\t\th_eqn = np.array([h_eqn[reg-1] for reg in reg_d])\n\t\t\th_list.append(h_eqn)\n\t\t\tg_list.append(g_eqn)\n\t\n\t\treturn (a_list,b_list,g_list,h_list)", "def params(self):\n return self._params", "def params(self):\n return self._params", "def params(self):\n return self._params", "def InitParams(ss):\n ss.Params.OpenJSON(\"pat_assoc.params\")", "def params(self) -> Munch:\n return self._params", "def parameters(self):\n return self._params", "def get_params(self):", "def parameters(self):\n return self.pars", "def get_params(self):\n return {}", "def getParams(self):\n return self.__params", "def _training_params(self):\n if isinstance(\n self.lyapunov_hybrid_system.system,\n feedback_system.FeedbackSystem) and self.search_controller:\n # For a feedback system, we train both the Lyapunov network\n # parameters and the controller network parameters.\n training_params = list(\n self.lyapunov_hybrid_system.lyapunov_relu.parameters(\n )) + self.lyapunov_hybrid_system.system.controller_variables(\n ) + self.R_options.variables()\n else:\n training_params = \\\n list(self.lyapunov_hybrid_system.lyapunov_relu.parameters()) +\\\n self.R_options.variables()\n return training_params", "def _get_init_args(self):\n\n return dict(enum=self.enum, dflt=self._defname,\n base=self.base, shape=self.shape)", "def params():\n return utils.Params('../experiments/base-model/params.json')", "def get_params(self, _deep: bool = True) -> dict:\n return self.params", "def getParameters(self):\n\t\td = AdaptiveBatAlgorithm.getParameters(self)\n\t\td.update({\n\t\t\t'A_l': self.A_l,\n\t\t\t'A_u': self.A_u,\n\t\t\t'r_l': self.r_l,\n\t\t\t'r_u': self.r_u,\n\t\t\t'tao_1': self.tao_1,\n\t\t\t'tao_2': self.tao_2\n\t\t})\n\t\treturn d", "def param_init(self, sig=0.01):\n self.rhos = np.ones(self.Ndim)\n self.a = np.random.rand(self.Ndim, self.Nhidden)\n self.c = np.random.rand(self.Nhidden)\n self.W = np.random.randn(self.Nhidden, self.Ndim) * sig\n self.alphas = np.zeros((self.Ndim, self.Ncomponents))\n self.mus = np.zeros((self.Ndim, self.Ncomponents))\n self.sigmas = np.zeros((self.Ndim, self.Ncomponents))\n self.optimize_params = [self.rhos, self.c, self.W]\n\n types = ['alpha', 'mu', 'sigma']\n self.bs = {}\n self.Vs = {}\n for t in types:\n self.bs[t] = np.random.randn(self.Ndim, self.Ncomponents) * sig\n self.Vs[t] = np.random.randn(self.Ndim, self.Nhidden,\n self.Ncomponents) * sig\n self.optimize_params.append(self.bs[t])\n self.optimize_params.append(self.Vs[t])", "def _get_options(self):\n return self.options", "def params(self):\n return {'kernel_size': self.kernel_size,\n 'stride': self.stride,\n 'n_kernel': self.n_kernel,\n 'padding': self.padding,\n 'act_fn': self.act_fn,\n 'output_shape': self.output_shape,\n 'w_init_fn': self.w_init_fn,\n 'use_bias': self.use_bias,\n 'idx': self.idx}", "def get_params(self):\n params = {}\n for step in self.steps:\n params[step[0]] = step[1].get_params()\n return params", "def get_params(self):\n return {'k': self.k, 'q': self.q, 'sigma_s': self.sigma_s, 'm': self.m}", "def _opt_config(self):\n return self._opt_method.config", "def create_initial_parameters(self):\n update_nested_dictionary(\n self.settings,\n {self.highest_lookup: {\n self.highest_sublookup: self.kw\n }})", "def get_recipe_params(self):\n return self.recipe_settings.get('params')", "def params(self):\n return {'out_dim': self.out_dim,\n 'act_fn': self.act_fn,\n 'use_bias': self.use_bias,\n 'idx': self.idx}", "def get_params(self):\n return {'classifier': self.classifier,\n 'grid_param': self.grid_param,\n 'n_param_comb': self.n_param_comb,\n 'top_bagging': self.bagging,\n 'bagging_param': self.bagging_param,\n 'comb_seed': self.comb_seed}" ]
[ "0.67133015", "0.6513982", "0.64959013", "0.64958394", "0.644193", "0.644193", "0.64085335", "0.6405476", "0.6403037", "0.63784325", "0.63532674", "0.63532674", "0.63532674", "0.63532674", "0.6348272", "0.6348272", "0.6348272", "0.6348272", "0.6346435", "0.6290466", "0.62406987", "0.6235463", "0.61874795", "0.6179658", "0.6179658", "0.61690176", "0.6144498", "0.6127659", "0.6100183", "0.6099213", "0.60024166", "0.5981427", "0.59756935", "0.59740895", "0.59695476", "0.5943465", "0.5943465", "0.59335357", "0.59237605", "0.59237605", "0.59237605", "0.5914827", "0.5914827", "0.5914827", "0.5909921", "0.590663", "0.5898709", "0.58973664", "0.5897294", "0.5892303", "0.5876542", "0.5871906", "0.5870479", "0.5869639", "0.5868546", "0.58623654", "0.5827594", "0.58160186", "0.5808464", "0.5794341", "0.5792974", "0.57897836", "0.57710165", "0.5760049", "0.57558143", "0.5754526", "0.57403755", "0.57216996", "0.572161", "0.57024556", "0.5701006", "0.5699889", "0.56818247", "0.5678791", "0.56735605", "0.56713885", "0.56713885", "0.56713885", "0.5660551", "0.5652866", "0.563679", "0.5613381", "0.56068367", "0.56062454", "0.5604519", "0.5602595", "0.56016135", "0.55981946", "0.5581851", "0.5545409", "0.55413413", "0.5533438", "0.5533008", "0.5531006", "0.5526959", "0.5524576", "0.55159366", "0.5506489", "0.5498352", "0.5485236" ]
0.87101316
0
Gets the current optuna_verbose
def _get_optuna_verbose(self): self._validate_optuna_verbose() # use only for mode Optuna if self._get_mode() != "Optuna": return True return deepcopy(self.optuna_verbose)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_verbose(self):\n self._validate_verbose()\n return deepcopy(self.verbose)", "def verbose(self):\n return self.conf.get(\"verbose\")", "def verbose():\n return _verbose", "def verbose(self):\n return self._verbose", "def verbose(self):\n return self._verbose", "def verbose(self):\n return self._verbose", "def verbose(self):\n return self._verbose", "def getVerbose(self):\n return self.__VERBOSE", "def verbose(self):\n\n return self._verbose", "def isVerbose(self):\n return self.opts.verbose", "def _verbose(self):\n return self._toBool(os.environ.get('VERBOSE', 0))", "def is_verbose():\n return g_verbose", "def verbose( self ):\n return Verbose.__level", "def verbose():\n return Verbose.level()", "def is_verbose() -> bool:\n return VERBOSE", "def verbosity(self):\n return self._get('verbosity')", "def verbose(self):\n enabled = self.lib.iperf_get_verbose(self._test)\n\n if enabled:\n self._verbose = True\n else:\n self._verbose = False\n\n return self._verbose", "def verbosity(self):\n return self._verbosity", "def verbose_str(self):\n return self.summary.verbose(self.results) or ''", "def verbose(self):\n verbose = self.__class__.__name__ + \", alpha: \" + str(self.alpha)\n return verbose", "def verbose():\n GLOBAL['VERBOSE'] = True", "def verbosePref(self):\n # If the level of the object is below the Preference level,\n # recursively calls base (super) classes to get preference at specified level\n return self.get_pref_setting_for_level(VERBOSE_PREF, self._verbose_pref.level)[0]", "def enable_verbose(self):\n self.verbose = True", "def verbose(value=None):\n global verbosity\n\n if value != None:\n verbosity = value\n \n try:\n rval = verbosity\n except NameError:\n verbosity = False\n rval = verbosity\n\n return rval", "def _set_verbose(value):\n global VERBOSE\n VERBOSE = value", "def gnupg_verbose():\n if LOGGER.getEffectiveLevel() == logging.DEBUG:\n return [\"--verbose\"]\n\n return [\"-q\"]", "def verbosity_for_session(request):\n return request.config.getoption(\"--verbosity-project\")", "def tunnel1_log_options(self) -> pulumi.Output['outputs.VpnConnectionTunnel1LogOptions']:\n return pulumi.get(self, \"tunnel1_log_options\")", "def option(self):\r\n return conf.lib.clang_getDiagnosticOption(self, None)", "def set_verbose(self, v):\n self._verbose = bool(v)", "def setVerbose(newVal):\n global verbose\n verbose = newVal", "def __read_verbose_param(self, context):\n self.__verbose = False\n if context.args and context.args[0] in \"verboseVERBOSE\":\n self.__verbose = True", "def logging_verbosity(self):\n\n return self.get_raw(\"logging_verbosity\")", "def verbose(self, enabled=True):\r\n self.verbose = verbose", "def setVerbose(self, v):\n return self._set(verbose=v)", "def SetVerbose(new_verbose=True):\n global _verbose\n _verbose = new_verbose", "def localStageOutOption(self):\n return self.localStageOut['option']", "def verbose(self, value):\n if value > self.DEBUG:\n value = self.DEBUG\n if value < self.NONE:\n value = self.NONE\n self._verbose = value", "def _ansible_verbose(verbose_level=1):\n flag = ''\n if verbose_level > 1:\n flag = f'-{\"v\" * (verbose_level - 1)}'\n return flag", "def parse_verbose_option(ctx: click.Context, param: click.Parameter, value: Optional[bool]) -> None:\n if value:\n logger = container.logger()\n logger.debug_logging_enabled = True", "def isVerboseDebug(self):\n self.edLogging.isVerboseDebug()", "def verbose(obj, msg):\n return obj.verbose(msg)", "def setVerbose(self, value):\n return self._set(verbose=value)", "def setVerbose(self, value):\n return self._set(verbose=value)", "def print_if_verbose(self, log):\n\n if self.verbose:\n print(log)\n return log", "def help_opt(self):\n print(OPTIONS)", "def verbose ( self , message , *args , **kwargs ) :\n return self.logger.verbose ( message , *args , **kwargs )", "def tunnel1_log_options(self) -> Optional[pulumi.Input['VpnConnectionTunnel1LogOptionsArgs']]:\n return pulumi.get(self, \"tunnel1_log_options\")", "def tunnel1_log_options(self) -> Optional[pulumi.Input['VpnConnectionTunnel1LogOptionsArgs']]:\n return pulumi.get(self, \"tunnel1_log_options\")", "def Verbose(on_off=\"on\"):\n\n global verbose\n \n if on_off.isdigit():\n int_value = int(on_off)\n else:\n int_value = 1\n\n if on_off.lower() == \"off\":\n int_value = 0\n print \"Verbose disabled.\"\n elif on_off.lower() == \"on\":\n int_value = 1\n print \"Verbose enabled.\"\n \n if -1 < int_value < 3:\n verbose=int_value\n interface.VERBOSE=int_value\n else:\n raise TypeError", "def do_verbose(self, arg):\n global verbose\n if verbose == 1:\n verbose = 0\n # prtin and add to log file \n logmsg = \" INFO: verbose mode disable\"\n log(logmsg)\n else:\n verbose = 1\n # prtin and add to log file \n logmsg = \" INFO: verbose mode enable\"\n log(logmsg)", "def verbose(self, state):\n self._verbose = state", "def get_verbose_name(self):\n return self.verbose_name or self.__class__.__name__", "def known_verbose_name():\n return 'test Verbose name'", "def _do_set_verbose(self, args):\r\n verbose = int(args[1])\r\n self.server.set_verbose(verbose)\r\n return \"%d\" % verbose", "def v_action(option,opt_str,value,parser):\n cmdline_main.message(\"Enabling verbose message output.\")\n if hasattr(parameterized,'get_logger'):\n parameterized.get_logger().setLevel(parameterized.VERBOSE)\n else: # For versions of the param package before 9 May 2013\n parameterized.min_print_level=parameterized.VERBOSE", "def verbosity(v):\n assert v in [0,1,2] # debug, warn, info\n GLOBAL['VERBOSITY'] = v", "def opt_value(self):\n return self._opt_value", "def verbose(self, verbose):\n self._verbose = verbose", "def get_short_flag(self):\n return self.short_flag", "def _verbose(self,text):\n if self.verbose:\n print(text)", "def optioninfo(self, option):\n return self._moptions[option]", "def GetVerbosityLevel(self):\n if self.verbose and self.simulation_verbose:\n\n return 2\n\n elif self.verbose and not self.simulation_verbose:\n\n return 1\n\n elif not self.verbose and self.simulation_verbose:\n\n return 1\n\n else:\n\n return 0", "def get_display_options(verbose=False):\n if verbose:\n pprint(display_options)\n return display_options", "def toggleVerbose(self):\n self.__VERBOSE = not self.__VERBOSE", "def is_verbose_log_enabled(self):\n\t\treturn bool(call_sdk_function('PrlDispCfg_IsVerboseLogEnabled', self.handle))", "def setVerboseOn(self):\n self.edLogging.setVerboseOn()", "def verbose_log_link(self) -> str:\n return pulumi.get(self, \"verbose_log_link\")", "def set_verbose(verbosity: bool) -> None:\n global VERBOSE # pylint: disable=global-statement\n VERBOSE = verbosity", "def OnSim42Verbose(self, event):\n self.verbose = event.IsChecked()\n if self.verbose:\n cb = ShellInfoCallBack(self.sim42interp)\n self.sim42interp.cmd.SetInfoCallBack(cb)\n else:\n self.sim42interp.cmd.SetInfoCallBack(self.sim42interp.cmd.infoCallBack)", "def unittest_verbosity():\n frame = inspect.currentframe()\n while frame:\n self = frame.f_locals.get(\"self\")\n if isinstance(self, unittest.TestProgram):\n return self.verbosity\n frame = frame.f_back\n return 0", "def get_level(self):\n return self.debug_level, self.verbosity", "def info(self):\n import tc\n ## enumerate all options\n opts = self.to_list()\n res = \"\"\n fmt = \"%20s = %5s ## %s\\n\"\n\n for k, v in opts:\n res += fmt % (k, str(self.__getattribute__(k)),\n str(v.doc()).split('\\n')[0])\n\n return res", "def debug(self):\n return self.settings['debug']", "def get_opt(self):\n return self.parser.parse_args()", "def get_opt(self):\n return self.parser.parse_args()", "def get_opt(self):\n return self.parser.parse_args()", "def set_verbose(self, verbose):\n self._verbose = verbose", "def print_verbose(args, msg):\n if args.verbose:\n print(msg)", "def show(self,verbose=0):\n print 'inferenceArgs',self.ws.inferenceArgs\n print 'inferenceExpr',theano.pp(self.ws.inferenceExpr)\n if verbose>=1:\n print 'debugprint inferenceExpr:'\n theano.printing.debugprint(self.ws.inferenceExpr)\n if self.ws.dataLossExpr:\n print 'dataLossArgs',self.ws.dataLossArgs\n print 'dataLossExpr',theano.pp(self.ws.dataLossExpr)\n print 'debugprint dataLossExpr:'\n theano.printing.debugprint(self.ws.dataLossExpr)", "def setVerboseLevel(self,verbose):\n\tself.verbose=verbose\n\tif self.verbose and self.dbType=='sqlite':\n\t print \"db isolation\",self.db.isolation_level", "def get_verbosity_level():\n try:\n level = rcp.get(\"verbosity\",\"level\").upper()\n return convert_logging_level(level)\n except:\n logging.warning(\"[verbosity] section of the config malformed.\")\n return False", "def verbosePref(self, setting):\n self.set_preference(candidate_info=setting, pref_ivar_name=VERBOSE_PREF)", "def setVerbose(*args,**kwargs):\n verbose = args[0] if args else True\n if verbose:\n verbositySampleTools = 2\n verbosityPlotTools = 2\n verbosityVariableTools = 2\n verbositySelectionTools = 2\n verbosityWJ = 2", "def tunnel2_log_options(self) -> pulumi.Output['outputs.VpnConnectionTunnel2LogOptions']:\n return pulumi.get(self, \"tunnel2_log_options\")", "def verbose_logger():\n lumigo_utils.get_logger().setLevel(logging.DEBUG)\n lumigo_utils.config(should_report=False, verbose=True)", "def get_options(self):\n\t\treturn self.options", "def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())", "def test_valid_verbose(verbose: Any) -> None:\n check_verbose(verbose)", "def _get_explain_level(self):\n self._validate_explain_level()\n if self.explain_level == \"auto\":\n if self._get_mode() == \"Explain\":\n return 2\n if self._get_mode() == \"Perform\":\n return 1\n if self._get_mode() == \"Compete\":\n return 0\n if self._get_mode() == \"Optuna\":\n return 0\n else:\n return deepcopy(self.explain_level)", "def options(self):\r\n return self._options", "def get_current_option(self) -> str:\n return self.options[self.current_option_index]", "def set_verbose(self, verbose):\n self._shared.set_verbose(verbose)", "def vv_flag():\n log.setLevel(logging.DEBUG)", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def _opt_config(self):\n return self._opt_method.config", "def _get_options(self):\n return self.options", "def options(self):\n return self.__options", "def turn_on_verbosity(self):\n self.m.setParam('OutputFlag', 1)" ]
[ "0.78699535", "0.7726708", "0.7537154", "0.746297", "0.746297", "0.746297", "0.746297", "0.7356259", "0.73098314", "0.7288619", "0.71457535", "0.70966977", "0.7072135", "0.70582813", "0.66449106", "0.65642446", "0.65546435", "0.650735", "0.6440845", "0.6396217", "0.63302183", "0.6306379", "0.6233047", "0.6164169", "0.6089423", "0.607687", "0.60681003", "0.6021105", "0.6017878", "0.599446", "0.5985038", "0.5967615", "0.5962597", "0.5838324", "0.58104366", "0.5798574", "0.5796168", "0.57713026", "0.57590777", "0.5755264", "0.56869185", "0.5674847", "0.5666638", "0.5666638", "0.5649091", "0.5578368", "0.5575668", "0.5544314", "0.5544314", "0.5534656", "0.55160254", "0.5515787", "0.5488114", "0.5484427", "0.54816574", "0.5472888", "0.5444445", "0.54392594", "0.54239863", "0.5423426", "0.5413982", "0.5413963", "0.54098594", "0.5395899", "0.53575814", "0.534526", "0.5315901", "0.5303838", "0.5290439", "0.52792454", "0.52734745", "0.52643055", "0.52559924", "0.52554864", "0.5241087", "0.5241087", "0.5241087", "0.5240746", "0.52280635", "0.5224686", "0.52237976", "0.52106875", "0.52036566", "0.5192621", "0.5165171", "0.5162613", "0.5134555", "0.5126958", "0.51215297", "0.51189345", "0.51181793", "0.51082104", "0.5100449", "0.50995064", "0.5092795", "0.5092795", "0.50860655", "0.50818175", "0.5078998", "0.5074223" ]
0.8742914
0
Gets the current n_jobs
def _get_n_jobs(self): self._validate_n_jobs() return deepcopy(self.n_jobs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_n_jobs(self):\n return self.n_jobs", "def effective_n_jobs(n_jobs=-1):\n if n_jobs == 1:\n return 1\n\n backend, backend_n_jobs = get_active_backend()\n if n_jobs is None:\n n_jobs = backend_n_jobs\n return backend.effective_n_jobs(n_jobs=n_jobs)", "def get_num_jobs(self):\n return str(self.num_jobs)", "def num_worker(self):\n return self.config.get(\"jobs\", 4)", "def _get_njobs_in_queue(self, username):", "def num_jobs(self):\n return self.jobs.qsize()", "def jobs(self):\n return self.get_jobs()", "def check_n_jobs(n_jobs):\n # scikit-learn convention\n # https://scikit-learn.org/stable/glossary.html#term-n-jobs\n if n_jobs is None:\n return 1\n elif not is_int(n_jobs):\n raise ValueError(f\"`n_jobs` must be None or an integer, but found: {n_jobs}\")\n elif n_jobs < 0:\n return os.cpu_count()\n else:\n return n_jobs", "def __number_of_jobs__(self):\n # | - __number_of_jobs__\n num_jobs = 0\n\n # Regular jobs\n if self.job_var_lst is not None:\n num_jobs = len(self.job_var_lst)\n\n # Individual dir jobs\n if self.indiv_dir_lst is not None:\n num_jobs += len(self.indiv_dir_lst)\n\n\n return(num_jobs)\n # __|", "def jobs(self):\n return self._jobs", "def jobserver_running_jobs():\n\n if _MakeJobServer._singleton is None:\n return '?'\n\n try:\n buf = array.array('i', [0])\n if fcntl.ioctl(_MakeJobServer._singleton.job_pipe[0], FIONREAD, buf) == 0:\n return _MakeJobServer._singleton.num_jobs - buf[0]\n except NotImplementedError:\n pass\n except OSError:\n pass\n\n return _MakeJobServer._singleton.num_jobs", "def check_n_jobs(n_jobs):\n if n_jobs is None:\n return 1\n elif not is_int(n_jobs):\n raise ValueError(f\"`n_jobs` must be None or an integer, but found: {n_jobs}\")\n elif n_jobs < 0:\n return cpu_count() + n_jobs + 1\n else:\n return min(n_jobs,cpu_count())", "def numSubmitted(self):\n return len(self.__submittedJobs)", "def set_n_jobs(self, new_n_jobs=None):\n self.n_jobs = new_n_jobs", "def jobserver_max_jobs():\n\n if _MakeJobServer._singleton is not None:\n return _MakeJobServer._singleton.num_jobs\n else:\n return 0", "def jobs(self):\n raise NotImplementedError()", "def nworkers(self):\n return len(self._workers)", "def getnoofjobs(self):\n select_noofjobs = (\n \"select count(*) from public.jobs where latestjobversion=True \"\n \"and insertdate between Date(%s) and Date(%s) \"\n \"and (username not in (%s))\"\n )\n\n\n\n self.pgcursor.execute(select_noofjobs, (self.startdate, self.enddate, self.adminusers))\n\n noofjobs = 0\n count = self.pgcursor.fetchone()\n if count is not None:\n noofjobs = count[0]\n\n # print(str.format(\"total no of jobs: {0}\", noofjobs))\n return noofjobs", "def get_waiting_jobs(self):\n return []", "def __init__(self, n_jobs=1, verbose=True):\n self.n_jobs = n_jobs\n self.verbose = verbose", "def count(self):\n # no auth?\n return self.app.db.jobs.count()", "def get_njobs_in_queue(self, username=None):\n if username is None: username = getpass.getuser()\n njobs, process = self._get_njobs_in_queue(username=username)\n\n if process is not None and process.returncode != 0:\n # there's a problem talking to squeue server?\n err_msg = ('Error trying to get the number of jobs in the queue' +\n 'The error response reads:\\n {}'.format(process.stderr.read()))\n logger.critical(err_msg)\n\n if not isinstance(self, ShellAdapter):\n logger.info('The number of jobs currently in the queue is: {}'.format(njobs))\n\n return njobs", "def n_worker(self):\n return self.redis.pubsub_numsub(MSG)[0][-1]", "def _check_n_jobs(n_jobs):\n _check_type(n_jobs, (\"int\",), \"n_jobs\")\n if n_jobs <= 0:\n n_cores = mp.cpu_count()\n n_jobs_orig = n_jobs\n n_jobs = min(n_cores + n_jobs + 1, n_cores)\n if n_jobs <= 0:\n raise ValueError(\n f\"If n_jobs has a non-positive value ({n_jobs_orig}), it must \"\n f\"not be less than the number of CPUs present ({n_cores}).\"\n )\n return n_jobs", "def get_ncores(self):\n return self._ncores", "def num_workers(self):\n return self._num_workers", "def getWorkers(self):\n return self.workers", "def get_n_workers(self):\n return self.df.worker.nunique()", "def get_num_parallel_workers():\n return _config.get_num_parallel_workers()", "def getJobID(self):\n return self.__nupicJobID", "def get_job_list(self):\n return self.job_list", "def get_job_list(self):\n return self.job_list", "def latest_job(self):\n return self.jobmanagers[self.current_network].latest_job", "def get_jobs(self):\n return list(self._jobs.values())", "async def get_jobs(): \n return mngr.getAllJobs()", "def job_info(self):\n def _sortkey(x):\n return x['job_name']\n\n resp = self._cmd(uri = '/jenkins_jobs')\n jobs = resp.get('jobs', [])\n return sorted(jobs, key=_sortkey)", "def info(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s\" % (self.sessionid,\n self.name))\n return self.server.json_body(resp)", "def get_job_names(self):\n return []", "def current_job(self):\n assert(ExecutorThread.executor_object is not None)\n return self.__job", "def test_valid_n_jobs(n_jobs: Any) -> None:\n check_n_jobs(n_jobs)", "def n_in_progress(self) -> int:\n return self.n_tasks() - self.qsize()", "def set_num_jobs(self, num):\n self.num_jobs = num", "def launch_status(self):\n print(\n f\"Starting job with {len(self.fe.get_network())} jobs total. \",\n end=\"\\r\",\n )", "def list_jobs(arn=None, nextToken=None):\n pass", "def _get_jobs():\n return _get_bigquery_service().jobs()", "def getNode(self):\r\n try:\r\n output,error = Popen(\"qstat | grep \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()\r\n if self.jobId in output:\r\n return output.split(\"\\t\")[7]\r\n if len(error) > 0:\r\n logging.error(error)\r\n except ValueError:\r\n logging.info(\"Error: waiting for not submitted job...\")", "def get_jobs_id(self, ti) -> None:\n return self.get_hook().get_jobs_id(ti)", "def nThreads(self):\n return self._c_param.n_threads", "def getCondorRunningJobs(user = None):\n if not user:\n user = getpass.getuser()\n\n command = ['condor_q', user]\n pipe = Popen(command, stdout = PIPE, stderr = PIPE, shell = False)\n stdout, error = pipe.communicate()\n\n output = stdout.split('\\n')[-2]\n\n nJobs = int(output.split(';')[0].split()[0])\n\n return nJobs", "def Length(self):\n return len(self.jobQueue)", "def next_job_run(self):\n return self._data.get('next_job_run')", "def get_jobs_connection(self):\n return self.m_connection.jobs", "def name(self):\n return self._job", "def getThreads():\r\n return multiprocessing.cpu_count()", "def running_jobs():\n dirs = [os.path.join(RESULTS_DIR, dir_)\n for dir_ in os.listdir(RESULTS_DIR)\n if os.path.isdir(os.path.join(RESULTS_DIR, dir_))]\n active_jobs = len(dirs)\n for f in dirs:\n if 'eplusout.end\\n' in os.listdir(os.path.join(RESULTS_DIR, f)):\n active_jobs -= 1\n return active_jobs", "def get_executed_jobs(self):\n with self.__lock:\n return list(self.__executed_jobs)", "def get_list_size(self):\n\n return self.r.llen(self.joblist)", "def job(self) -> str:\n return self._job", "def job(self) -> str:\n return self._job", "def num_workers(self) -> int:\n return sum(self.client.nthreads().values())", "def n_jobs_scaler(n_features=_default_nfeatures):\n if n_features is None:\n raise ValueError\n\n return int(min(4, math.ceil(n_features / 2000.)))", "def workers(self):\n return self.worker_list", "def get_nb_results(self):\n return self.nb_results", "def job(self):\n return self.batch[self.job_id]", "def job_ids(self):\n return self.get_job_ids()", "def get_jobs(self, age=1):\n jobs_for_reaper = []\n try: \n api_response = self.kube_v1_batch_client.list_namespaced_job(namespace=self.project, label_selector='job-origin=pman', include_uninitialized=True)\n for item in api_response.items:\n # Checking if job has finished running, either failed or succeeded\n if item.status.conditions and (item.status.failed or item.status.succeeded):\n # Using start_time because failed jobs have no completion_time\n start_time = item.status.start_time\n current_time = datetime.datetime.now(datetime.timezone.utc)\n diff = current_time-start_time\n # 86400 = number of seconds in a day. \"divmod\" returns quotient and remainder as tuple e.g (1, 5.74943)\n # means 1 day and 5.74943 sec have passed between current_time and start_time of the job\n diff_in_seconds = divmod(diff.total_seconds(), 86400)\n if diff_in_seconds[0] >= 1:\n jobs_for_reaper.append(item.metadata.name)\n \n except ApiException as e:\n print(\"Exception when calling BatchV1Api->list_namespaced_job: %s\\n\" % e)\n exit(1)\n return jobs_for_reaper", "def get_jobs(bmc, only_unfinished):\n jobs = bmc.list_jobs(only_unfinished)\n return namedtuples_to_dicts(jobs)", "def get_jobs():\n \n rate_limit()\n command = [\"bjobs\", \"-o\", \"\\\"JOBID\", \"USER\", \"STAT\", \"QUEUE\", \"JOB_NAME\", \\\n \"delimiter=';'\\\"\"]\n command = \" \".join(command)\n jobs = subprocess.check_output(command, shell=True, stderr=open(os.devnull))\n \n # if there aren't any currently running or pending jobs, then the output\n if jobs == \"\":\n return set([])\n \n jobs = jobs.decode().strip().split(\"\\n\")\n \n current_jobs = set([])\n for line in jobs:\n if line.startswith(\"JOBID\"): # ignore the header line\n continue\n \n line = line.split(\";\")\n job_name = line[4]\n current_jobs.add(job_name)\n \n return current_jobs", "async def get_jobs(jobId: int) -> str: \n return mngr.getJob(str(jobId))", "def check_n_jobs(n_jobs, allow_cuda=False):\n if not isinstance(n_jobs, int):\n if not allow_cuda:\n raise ValueError('n_jobs must be an integer')\n elif not isinstance(n_jobs, string_types) or n_jobs != 'cuda':\n raise ValueError('n_jobs must be an integer, or \"cuda\"')\n # else, we have n_jobs='cuda' and this is okay, so do nothing\n elif _force_serial:\n n_jobs = 1\n logger.info('... MNE_FORCE_SERIAL set. Processing in forced '\n 'serial mode.')\n elif n_jobs <= 0:\n try:\n import multiprocessing\n n_cores = multiprocessing.cpu_count()\n n_jobs = min(n_cores + n_jobs + 1, n_cores)\n if n_jobs <= 0:\n raise ValueError('If n_jobs has a negative value it must not '\n 'be less than the number of CPUs present. '\n 'You\\'ve got %s CPUs' % n_cores)\n except ImportError:\n # only warn if they tried to use something other than 1 job\n if n_jobs != 1:\n warn('multiprocessing not installed. Cannot run in parallel.')\n n_jobs = 1\n\n return n_jobs", "def get_number_executors(self):\n with self.__threads_lock:\n return self.__number_executors", "def omp_threads(self):\n if self.has_omp:\n return self.omp_env[\"OMP_NUM_THREADS\"]\n else:\n return 1", "def genJobList():\n nit=10\n reply=[]\n while len(reply)<10: #assume qstat fails if less that 10 jobs on cluster\n reply=chomp(os.popen('qstat|expand|tr -s \\' \\'|cut -d\\' \\' -f 1,2,5').readlines())\n nit+=1\n if nit>10: break\n return reply", "def get_job(self) -> Job:\n return self.jobs_list[self.sel_idx]", "def test_n_jobs(self):\n for n_jobs in [1, 6]:\n with self.subTest(input='list', n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=None, n_splits=None,\n n_jobs=n_jobs), min(4 * n_jobs, len(self.test_data)))\n\n with self.subTest(input='numpy', n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=None, n_splits=None,\n n_jobs=n_jobs), min(4 * n_jobs, len(self.test_data_numpy)))", "def get_running_condor_jobs(self):\n return Utils.condor_q(selection_pairs=[[\"taskname\",self.unique_name]], extra_columns=[\"jobnum\"])", "def determine_number_of_jobs(\n parallel=False, command_line=None, config_default=None, max_cpus=None\n):\n if not parallel:\n return 1\n\n if command_line is None and \"command_line\" in spack.config.scopes():\n command_line = spack.config.get(\"config:build_jobs\", scope=\"command_line\")\n\n if command_line is not None:\n return command_line\n\n max_cpus = max_cpus or cpus_available()\n\n # in some rare cases _builtin config may not be set, so default to max 16\n config_default = config_default or spack.config.get(\"config:build_jobs\", 16)\n\n return min(max_cpus, config_default)", "def maximum_number_of_workers(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"maximum_number_of_workers\")", "def num_cores(self):\n return self.mpi_procs * self.omp_threads", "def give_workers_list(self):\n return self._workers", "def get_jobs_in_queue() -> List[int]:\n output = subprocess.check_output([\"qstat\"]).decode().splitlines()\n job_ids = []\n for line in output:\n m = REGEX_QSTAT.match(line)\n if m:\n job_ids.append(int(m.group(1)))\n return job_ids", "def get_completed_incore_jobs(self):\n self.completed_incore_jobs = list()\n for label, job_names in self.running_jobs.items():\n for job_name in job_names:\n i = get_i_from_job_name(job_name)\n if i is None:\n job_type = '_'.join(job_name.split('_')[:-1]) # Consider job types such as 'directed_scan'.\n job = self.job_dict[label][job_type][job_name]\n elif 'conformer' in job_name:\n job = self.job_dict[label]['conformers'][i]\n elif 'tsg' in job_name:\n job = self.job_dict[label]['tsg'][i]\n else:\n raise ValueError(f'Did not recognize job {job_name} of species {label}.')\n if job.execution_type == 'incore' and job.job_status[0] == 'done':\n self.completed_incore_jobs.append(job.job_id)", "def get_job_list(self):\n job_list = []\n if mysql.job_list() == None:\n return job_list\n return mysql.job_list()", "def get_job_status(self):\n if self.worker_thread is None:\n return None\n else:\n return self.worker_thread.get_status()", "def number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1", "def update_cores(self):\n num_cores = 0\n for job in self.job_list:\n num_cores += int(job.get_core_info())\n self.cores_used = num_cores\n return", "def getFailedJobs(self):\n return self.__failedJobs", "def running_jobs_sherlock():\n user = os.environ['USER']\n\n return subprocess.check_output(['squeue', '-u',user,'-o','%Z']).split()[1:]", "def number_of_workers():\n return (cpu_count() * 2) + 1", "def get_current_jobs(ssh):\n stdin, stdout, stderr = ssh.exec_command('qstat')\n\n running_jobs = []\n for line in stdout.readlines():\n if '.awonmgr2' in line:\n jobid = line.split('.awonmgr2')[0]\n running_jobs.append(jobid)\n \n return running_jobs", "def __len__(self):\n return self.nb_iterations", "def get_jobs_grid_row_count(self):\n jobs_grid_count_span_element = self.wait().until(EC.presence_of_element_located(self.jobs_grid_count_span_locator))\n span_text = jobs_grid_count_span_element.text\n split_text = span_text.split()\n return split_text[2]", "def get_n_splits(self):\n return self.n_splits", "def get_n_splits(self):\n return self.n_splits", "def get_n_splits(self):\n return self.n_splits", "def get_jobs(dumpruninfo):\n if \"jobs\" not in dumpruninfo:\n return []\n return dumpruninfo[\"jobs\"].keys()", "def get_waiting_jobs(self):\n open_jobs = []\n with closing(self._conn.cursor()) as cursor:\n for row in cursor.execute( \"select job_name, job_version from jobs where job_state in ('\"\n + JobState.WAITING.value + \"','\" + JobState.WAITING_PRED.value + \"','\" + JobState.RUNNING.value +\"')\"):\n open_jobs.append((row[0], row[1]))\n return open_jobs", "def get_jobs(self, type = None):\n joblist = JobList()\n for jobs in self.sm.get_jobs(type = type):\n joblist.add_job(jobs['identifier'], jobs['phase'])\n return joblist.tostring()", "def _get_grid_jobs():\n output = _general_qstat()\n if not output:\n return None\n tree = ElementTree.fromstring(output)\n jobs = []\n for job in tree.iter('job_list'):\n job_number = job[0].text\n output = subprocess.check_output(\"qstat -j %s -xml\" % job[0].text, shell=True)\n job_tree = ElementTree.fromstring(output)[0][0] # First index is djob_info, second is element\n time_str = _get_job_tree_text(job_tree, \"JB_submission_time\")\n try:\n start_time = int(job_tree.find(\"JB_ja_tasks\")[0].find(\"JAT_start_time\").text)\n except (TypeError, AttributeError):\n # TypeError if JB_ja_tasks not in the tree (which will happen if not started)\n # AttributeError if JAT_start_time not in the subtree\n start_time = 0\n jobs.append({\n \"job_number\": int(job_number),\n \"script\": _get_job_tree_text(job_tree, \"JB_script_file\"),\n \"submission_time\": int(time_str) if time_str else 0,\n \"start_time\": start_time,\n \"cwd\": _get_job_tree_text(job_tree, \"JB_cwd\"),\n })\n return jobs", "def job_name(self):\n return self._stub.List(self._message).job_name" ]
[ "0.8941863", "0.80259633", "0.79868776", "0.78013074", "0.78005415", "0.7614501", "0.72230357", "0.71267", "0.70833606", "0.703076", "0.69810003", "0.69787186", "0.6843887", "0.6817876", "0.674785", "0.666808", "0.66524726", "0.6553903", "0.6538402", "0.65348506", "0.6515888", "0.651586", "0.6515622", "0.6509251", "0.64978224", "0.6491501", "0.64796466", "0.6465163", "0.64496046", "0.6389787", "0.6353111", "0.6353111", "0.63250446", "0.63089734", "0.6303203", "0.6301963", "0.63016015", "0.6275562", "0.6244756", "0.6241777", "0.6231969", "0.62244904", "0.6214536", "0.6151845", "0.6132509", "0.6124753", "0.61002856", "0.6098968", "0.6073416", "0.6064939", "0.6027036", "0.6025347", "0.6018521", "0.6017683", "0.6003514", "0.60023797", "0.5999034", "0.59946406", "0.59946406", "0.59780055", "0.5972887", "0.5967418", "0.59632367", "0.5946295", "0.5945192", "0.594035", "0.59393466", "0.59379", "0.5935984", "0.5928768", "0.5922473", "0.59080017", "0.5896903", "0.5891935", "0.58813894", "0.5871719", "0.58701104", "0.5859943", "0.58581084", "0.58557844", "0.5855045", "0.5851776", "0.58452976", "0.584064", "0.5840563", "0.58338416", "0.58306456", "0.58267444", "0.582051", "0.58154094", "0.5798652", "0.5798001", "0.5783708", "0.5783708", "0.5783708", "0.57816476", "0.5778693", "0.5775401", "0.57705337", "0.5766443" ]
0.84748805
1
Gets the current random_state
def _get_random_state(self): self._validate_random_state() return deepcopy(self.random_state)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rand(self):\n return self.State.rand()", "def getstate(self):\n return (self.baseseed, self.counter, self.randbits_remaining)", "def rand(self):\n self.state = (self.a * self.state + self.c)\n return self.state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def get_state(self):\n return self.env.sim.get_state()", "def exposed_get_state(self):\n return json.dumps(dict(state=random.choice(self.states)), indent=2)", "def get_state(self):\n return self.state", "def get_state(self):\n return self.state", "def get_new_state():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))\n return state", "def _random_function(self, random_state):\n return random_state.rand", "def get_state(self):\n pass", "def random_state(state):\n old_state = RandomState()\n state.set_global()\n yield\n old_state.set_global()", "def GetState(self):\r\n \r\n return self.state", "def get_current_state(self):\n return self.world.get_state()", "def get_current_state(self):\n return self.game.get_current_state()", "def get_state(self):\n return self._state", "def get_state(self):\n return self._state", "def get_state(self):\n return self._state", "def get_state(self):\n return self._state", "def get_state(self):\n return self._state", "def return_state(self):\n\t\treturn self.state", "def get_game_state(self):\n return self._current_state", "def get_state(self, state):\n return state", "def state(self):\n return self.get_state()", "def random_agent(self, state):\n\t\trndint = random.randint\n\t\treturn self.state[state][rndint(0, len(self.state[state]))]", "def get_random_start_state(self) -> State:\n if len(self.blocks) <= state_enumeration_limit:\n rnd = random.randint(0, len(self.allStates) - 1)\n return self.allStates[rnd]\n else:\n return self.generate_random_start_state()", "def get_current_state(self):\n return self._current_state", "def get_state(self):\n raise NotImplementedError", "def get_state(self):\n raise NotImplementedError" ]
[ "0.8123552", "0.7858806", "0.7808915", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.7497387", "0.74199545", "0.7412689", "0.7391705", "0.7391705", "0.7376774", "0.73502827", "0.7347904", "0.7336564", "0.732288", "0.7321669", "0.7304804", "0.7288502", "0.7288502", "0.7288502", "0.7288502", "0.7288502", "0.7269629", "0.72684383", "0.723892", "0.71439147", "0.7143449", "0.7123854", "0.71099514", "0.7104234", "0.7104234" ]
0.8489528
0
Gets the fairness metric
def _get_fairness_metric(self): self._validate_fairness_metric() if self.fairness_metric == "auto": if self._get_ml_task() == BINARY_CLASSIFICATION: return "demographic_parity_ratio" if self._get_ml_task() == REGRESSION: return "group_loss_ratio" if self._get_ml_task() == MULTICLASS_CLASSIFICATION: return "demographic_parity_ratio" else: return deepcopy(self.fairness_metric)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_fairness_threshold(self):\n if self.fairness_threshold == \"auto\":\n if self._get_ml_task() in [\n BINARY_CLASSIFICATION,\n MULTICLASS_CLASSIFICATION,\n ]:\n thresholds = {\n \"demographic_parity_difference\": 0.1,\n \"demographic_parity_ratio\": 0.8,\n \"equalized_odds_difference\": 0.1,\n \"equalized_odds_ratio\": 0.8,\n }\n return thresholds.get(self._fairness_metric, 0.8)\n elif self._get_ml_task() == REGRESSION:\n thresholds = {\n \"group_loss_ratio\": 0.8,\n }\n if self._fairness_metric == \"group_loss_difference\":\n raise AutoMLException(\n \"We can't set default fairness threshold value. Please set `fairness_threshold` value in AutoML constructor.\"\n )\n return thresholds.get(self._fairness_metric, 0.8)\n else:\n return deepcopy(self.fairness_threshold)", "def is_fair(self):\n fairness = Fairness(experience_weight=1)\n if fairness.is_fair(self):\n return 'This trade is fair!'\n return 'This trade is unfair!'", "def fairness_discrepancy(props, n_classes, norm=0):\n # unique, freq = np.unique(data, return_counts=True)\n # props = freq / len(data) #Proportion of data that belongs to that data\n \n # #------------------Modification to correct the zero support problem------------------------------------------------\n # temp=np.zeros(n_classes)\n # temp[unique]=props\n # props=temp\n # #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n #(Remove Normalisation)\n l2_fair_d = np.sqrt(((props - truth)**2).sum())\n l1_fair_d = abs(props - truth).sum()\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes) \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"L2\"), l1_fair_d/metric_max(n_classes,\"L1\"),info_spec/metric_max(n_classes,\"Is\"),specificity,wd/metric_max(n_classes,\"Wd\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity", "def f(ns, k):\n ns = list(enumerate(sorted(ns)))\n n = len(ns)\n min_u = unfair(ns[:k])\n nns = ns[k:]\n def g(mu, n):\n (i, n) = n\n unfairness = n - ns[i - k + 1][1]\n # print(i, n, unfairness)\n if unfairness < mu:\n return unfairness\n return mu\n return reduce(g, nns, min_u)", "def fairness_discrepancy(data, n_classes, norm=0):\n unique, freq = np.unique(data, return_counts=True)\n props = freq / len(data) #Proportion of data that belongs to that data\n \n #------------------Modification to correct the zero support problem------------------------------------------------\n temp=np.zeros(n_classes)\n temp[unique]=props\n props=temp\n #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n l2_fair_d = np.sqrt(((props - truth)**2).sum())/n_classes\n l1_fair_d = abs(props - truth).sum()/n_classes\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes)/n_classes \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n #Create array to populate proportions\n # props2=np.zeros(n_classes)\n # props2[unique]=props\n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n for i in props:\n f.write(\"%f \"%(i))\n f.write(\"\\n\")\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity,wd/metric_max(n_classes,\"wd\"),wds/metric_max(n_classes,\"wds\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity", "def test_get_score():\n\n assert sequence_threshold.get_score([]) == 0\n assert sequence_threshold.get_score(SortedSet()) == 0\n assert sequence_threshold.get_score(list(range(3, 36))) == 3\n assert sequence_threshold.get_score([10, 11, 12, 14, 16, 17]) == 10 + 14 + 16", "def test_estimate_statistics_priority(self):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"b\", 3.0)\n self.assertEqual(s.estimate_statistics(), 5.0)", "def coherence(self):\r\n coherence = np.abs(self.coherency ** 2)\r\n\r\n return coherence", "def sampling_priority(self):\n # type: () -> Optional[NumericType]\n return self._metrics.get(SAMPLING_PRIORITY_KEY)", "def readScore(self):\n return self.zmwMetric(\"ReadScore\")", "def get_score(self):\r\n return None", "def get_serendipity_val(dic, key):\n # The key was in the training set\n try:\n return dic[key]\n # The key wasn't in the training set, then the serendipity is 1\n except KeyError:\n return 1.0", "def get_metric(self) -> mt.Metric:\n return mt.BinaryAccuracy()", "def best_metric(self) -> float:\n return self._best_metric", "def max_staleness(self) -> str:\n return pulumi.get(self, \"max_staleness\")", "def coherence(self):\r\n return np.abs(self.coherency) ** 2", "def calc_fair_profit(self, assignment):\n fair_profit = {t:0 for t in self.tasks}\n for agent, tasks in assignment.items():\n for task in tasks:\n fair_profit[task] += self.profit(agent, task)\n return min(fair_profit.values())", "def worst_score(self):\r\n pass", "def coherency(self):\r\n coherency = tsa.cache_to_coherency(self.cache, self.ij)\r\n\r\n return coherency", "def balanced_accuracy(self):\n return 0.5 * (self.sensitivity + self.specificity)", "def get_performance(self):\n if self.skip_reference:\n return self.compare_sim.tps\n\n # Avoid divide by zero errors when the simulation is not executed.\n if self.reference_sim.tps == 0:\n return 0\n\n t0 = 1 / self.reference_sim.tps\n t1 = 1 / self.compare_sim.tps\n return 1 / (t1 - t0)", "def performance_measure(self, x):\n # \"calculate performance measure\" \n pref = x.evaluate()\n return pref", "def thread_priority(self) -> \"int\":\n return _beamforming_swig.randomsampler_sptr_thread_priority(self)", "def __evaluate(self, preds, labels, raw, fair, sort_by_unfairness=False,\n graph_prms=None):\n # Compute the distance from fair, then divide by fair to\n # compute the relative unfairness.\n diffs = (raw - fair) / fair\n if sort_by_unfairness:\n # Sort based on unfairness.\n diffs, indices = torch.sort(diffs)\n preds = preds[indices]\n labels = labels[indices]\n # Bucketize and compute bucket accuracies.\n num_samples = preds.size()[0]\n num_buckets = min(20 * (1 if sort_by_unfairness else 4), num_samples)\n num_per_bucket = math.floor(num_samples / num_buckets)\n assert num_per_bucket > 0, \\\n (\"There must be at least one sample per bucket, but there are \"\n f\"{num_samples} samples and only {num_buckets} buckets!\")\n # The resulting buckets are tuples of three values:\n # (x-axis value for bucket, number predicted correctly, total)\n buckets = [\n (x,\n self.check_output(preds_, labels_),\n preds_.size()[0])\n for x, preds_, labels_ in [\n # Each bucket is defined by a tuple of three values:\n # (x-axis value for bucket, predictions, ground truth labels).\n # The x-axis is the mean relative difference for this\n # bucket. A few values at the end may be discarded.\n (torch.mean(diffs[i:i + num_per_bucket]),\n preds[i:i + num_per_bucket],\n labels[i:i + num_per_bucket])\n for i in range(0, num_samples, num_per_bucket)]]\n if self.graph:\n assert graph_prms is not None, \\\n \"\\\"graph_prms\\\" must be a dict(), not None.\"\n assert \"flp\" in graph_prms, \"\\\"flp\\\" not in \\\"graph_prms\\\"!\"\n assert \"x_lim\" in graph_prms, \"\\\"x_lim\\\" not in \\\"graph_prms\\\"!\"\n # Plot each bucket's accuracy.\n pyplot.plot(\n ([x for x, _, _ in buckets]\n if sort_by_unfairness else list(range(len(buckets)))),\n [c / t for _, c, t in buckets], \"bo-\")\n pyplot.ylim((-0.1, 1.1))\n x_lim = graph_prms[\"x_lim\"]\n if x_lim is not None:\n pyplot.xlim(x_lim)\n pyplot.xlabel(\n \"Unfairness (fraction of fair)\"\n if sort_by_unfairness else \"Time\")\n pyplot.ylabel(\"Classification accuracy\")\n pyplot.tight_layout()\n pyplot.savefig(graph_prms[\"flp\"])\n pyplot.close()\n # Compute the overall accuracy.\n _, corrects, totals = zip(*buckets)\n acc = sum(corrects) / sum(totals)\n print(f\" Test accuracy: {acc * 100:.2f}%\")\n return acc", "def get_random_cpu_load():\n load = random.gauss(55, 10)\n if load < 0:\n return 0.0\n elif load > 100:\n return 100.0\n else:\n return round(load, 1)", "def best_value(self):\r\n return self._best_value", "def CountRandomLoadRate(self):\n\t\treturn self._get_attribute('countRandomLoadRate')", "def binary_fairness(\n preds: torch.Tensor,\n target: torch.Tensor,\n groups: torch.Tensor,\n task: Literal[\"demographic_parity\", \"equal_opportunity\", \"all\"] = \"all\",\n threshold: float = 0.5,\n ignore_index: Optional[int] = None,\n validate_args: bool = True,\n) -> Dict[str, torch.Tensor]:\n if task not in [\"demographic_parity\", \"equal_opportunity\", \"all\"]:\n raise ValueError(\n f\"Expected argument `task` to either be ``demographic_parity``,\"\n f\"``equal_opportunity`` or ``all`` but got {task}.\"\n )\n\n if task == \"demographic_parity\":\n if target is not None:\n rank_zero_warn(\"The task demographic_parity does not require a target.\", UserWarning)\n target = torch.zeros(preds.shape)\n\n num_groups = torch.unique(groups).shape[0]\n group_stats = _binary_groups_stat_scores(preds, target, groups, num_groups, threshold, ignore_index, validate_args)\n\n transformed_group_stats = _groups_stat_transform(group_stats)\n\n if task == \"demographic_parity\":\n return _compute_binary_demographic_parity(**transformed_group_stats)\n\n if task == \"equal_opportunity\":\n return _compute_binary_equal_opportunity(**transformed_group_stats)\n\n if task == \"all\":\n return {\n **_compute_binary_demographic_parity(**transformed_group_stats),\n **_compute_binary_equal_opportunity(**transformed_group_stats),\n }\n return None", "def random_importance_function():\n return random()", "def score(self):\n # loop over aminoacids in protein and calculate how often H and C are surrounded by H and C\n for aminoacid in self.aminoacids:\n if aminoacid.aminoacid_type == \"H\":\n self.stability = self.stability + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number))\n elif aminoacid.aminoacid_type == \"C\":\n self.stability = self.stability + (-5 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number))\n self.stability = self.stability/2\n return int(self.stability)", "def FastConvergenceThreshold(self):\n\t\treturn self._get_attribute('fastConvergenceThreshold')", "def average_waiting(self):\n return self._average_waiting", "def cost(self):\n lg = len(self.guarantees.cnf)\n la = len(self.assumptions.cnf)\n\n \"\"\"heuristic\n Low: guarantees while assuming little (assumption set is bigger)\n High: guarantees while assuming a lot (assumption set is smaller)\"\"\"\n\n return la / lg", "def _fair_sharing_mp(n=2,decimals=100,pratio=15,debug=False):\n import operator\n import itertools\n from mpmath import mp\n mp.dps=decimals\n p=mp.mpf(10)**-(decimals/pratio)\n acc=mp.mpf(0)\n wins=[mp.mpf(0)]*n\n if debug:\n print('p = '+str(p))\n for i in itertools.count():\n index,_ = min(enumerate(wins),key=operator.itemgetter(1))\n yield index\n inc=p*(1-acc)\n wins[index]+=inc\n acc+=inc\n if debug:\n print('acc = '+str(acc))", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def get_state_score(battle_queue: 'BattleQueue') -> int:\n\n first_player = battle_queue.peek().get_name()\n\n j = battle_queue.copy()\n return max(producer(j, first_player))", "def weighted_metrics(self):\n return None", "def is_best(self, metric: float) -> bool:", "def get_score(self) -> int:\n return self.rstate.score()", "def score(self) -> FAIRResultCommonScore:\n return self._score", "def calculateRSTFairshare(self, reqRules):\n self.getQoSTotals()\n reqRate, _ = self.convertToRate(reqRules['rules'])\n totalRate = None\n for key in ['src_ipv4', 'src_ipv6']:\n totalRate = self.getTotalAvailableForIP(reqRules[key])\n if totalRate:\n break\n if not totalRate:\n return None\n reserved = int(self.params[reqRules['master_intf']]['intf_reserve'])\n totalAll = self.params[reqRules['master_intf']]['total_allocated']\n intfMax = self.params[reqRules['master_intf']]['intf_max']\n # Formula 1: Find ratio between all servers total capacity\n # (individualRate/totalRates) = ratio\n ratio = round(float(intfMax) / float(totalRate), 4)\n # Formula 2: Find individual node fairshare\n # ratio * reqRate = nodeThrgShare\n nodeThrgShare = int(ratio * reqRate)\n # Condition 1: If not enough capacity on all Nodes,\n # do a fractional calculation for fairshare\n if nodeThrgShare >= intfMax or nodeThrgShare > totalAll:\n fractReq = self.reqRatio(intfMax, reqRate, totalAll)\n reqRate = fractReq\n # Min for QoS we use 1Gb/s.\n reqRate = max(int(nodeThrgShare), 1000)\n return {'reqRate': reqRate, 'reserved': reserved}", "def job_priority_key(self, job):\n if not self._stats.total_usage:\n fairshare = 1\n else:\n user = job.user\n effective = user.cpu_clock_used / self._stats.total_usage\n #shares_norm = user.shares # already normalized\n fairshare = 2.0 ** -(effective / user.shares)\n prio = int(fairshare * 100000) # higher value -> higher priority\n # TODO if needed change the constant to a configuration setting\n # TODO and add more components to the priority value\n return (-prio, job.submit, job.ID)", "def find_best(self):\n best_st = 0\n best_bt = 0\n best_perf = -1.1\n for bt in self.btl:\n for st in self.stl:\n if self.total[bt, st, \"perf\"] > best_perf:\n best_perf = self.total[bt, st, \"perf\"]\n best_st = st\n best_bt = bt\n return (best_perf, self.total[best_bt, best_st, \"count\"], best_bt, best_st)", "def MinRandomLoadRate(self):\n\t\treturn self._get_attribute('minRandomLoadRate')", "def policy_value_fn(board):\n # return uniform probabilities and 0 score for pure MCTS", "def tickPerf(self):\n return self._tickPerf", "def drift_score(self):\n if self.measured_val is None:\n return 0.0\n\n if self.rebalance_type == self.REBALANCE_TYPE_ABSOLUTE:\n return (self.measured_val - self.configured_val) / self.rebalance_thr\n else:\n return ((self.measured_val - self.configured_val) / self.configured_val) / self.rebalance_thr", "def get_score(self):\n return self.score", "def calculate_weighted_results():\n pass", "def get_score(self):\n return self.__score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def calculate_sample_silhouette(self):\n sum_samples = 0\n for cluster in self.cluster_lst:\n sum_samples += self.sum_silhouette(cluster)\n sample_size = len(self.samples)\n return sum_samples/sample_size", "def percent_to_class(prc, fair):\n assert len(prc) == 1, \"Should be only one column.\"\n prc = prc[0]\n\n # Threshold between fair and unfair.\n tsh_fair = 0.1\n\n dif = (fair - prc) / fair\n if dif < -1 * tsh_fair:\n # We are much higher than fair.\n cls = 2\n elif -1 * tsh_fair <= dif <= tsh_fair:\n # We are fair.\n cls = 1\n elif tsh_fair < dif:\n # We are much lower than fair.\n cls = 0\n else:\n assert False, \"This should never happen.\"\n return cls", "def policy_value_fn(board):\n # return uniform probabilities and 0 score for pure MCTS\n action_probs = np.ones(len(board.availables))/len(board.availables)\n return zip(board.availables, action_probs), 0", "def getBestCluster():\r\n global bestCluster\r\n return bestCluster", "def utility(state):\n return state.getScore()", "def purity_test(self):\n mean = filter_data(self.data,self.ancestors)['Class'].mean()\n if mean == 0:\n return 0\n elif mean == 1:\n return 1\n return None", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def getWeights(self, gameState, actton):\n\t\treturn {'successorScore': 1.0}", "def percent_to_class(prc, fair):\n assert len(prc) == 1, \"Should be only one column.\"\n prc = prc[0]\n\n # Threshold between fair and unfair.\n tsh_fair = 0.1\n # Threshold between unfair and very unfair.\n tsh_unfair = 0.4\n\n dif = (fair - prc) / fair\n if dif < -1 * tsh_unfair:\n # We are much higher than fair.\n cls = 4\n elif -1 * tsh_unfair <= dif < -1 * tsh_fair:\n # We are not that much higher than fair.\n cls = 3\n elif -1 * tsh_fair <= dif <= tsh_fair:\n # We are fair.\n cls = 2\n elif tsh_fair < dif <= tsh_unfair:\n # We are not that much lower than fair.\n cls = 1\n elif tsh_unfair < dif:\n # We are much lower than fair.\n cls = 0\n else:\n assert False, \"This should never happen.\"\n return cls", "def score(self):\n return None", "def get_clusterable_weights(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def thread_priority(self):\n return _spacegrant_swig.general_burster_2_sptr_thread_priority(self)", "def get_metric(self) -> mt.Metric:\n return mt.CategoricalAccuracy()", "def get_metric(self) -> mt.Metric:\n return mt.CategoricalAccuracy()", "def cpu_time(self):", "def test_get_scorable_straight():\n roll = np.array([1, 2, 3, 4, 5, 6])\n expected = {\n \"one\": 1,\n \"five\": 1,\n \"three-ones\": False,\n \"three-twos\": False,\n \"three-threes\": False,\n \"three-fours\": False,\n \"three-fives\": False,\n \"three-sixes\": False,\n \"four-of-a-kind\": False,\n \"three-and-one\": False,\n \"five-of-a-kind\": False,\n \"six-of-a-kind\": False,\n \"straight\": True,\n \"three-pairs\": False,\n \"four-and-pair\": False,\n \"triplets\": False,\n }\n actual = analyze_roll.get_scorable(roll)\n assert expected == actual", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def purity(clusters, classes):\n\n d = defaultdict(list)\n\n # Get a list of class numbers of all examples in a cluster.\n for k, v in zip(clusters, classes):\n d[k].append(v)\n\n mayority = 0\n\n # Count the mayority class number and add it up over all clusters.\n for k in d:\n mayority += Counter(d[k]).most_common(1)[0][1]\n\n return float(mayority) / len(clusters)", "def throughput(self) -> Optional[int]:\n return pulumi.get(self, \"throughput\")", "def throughput(self) -> Optional[int]:\n return pulumi.get(self, \"throughput\")", "def get_score(self):\r\n return self.lcp.get_score()", "def get_expected_cost(self):", "def teach_sensitivity(self):\r\n return self._arm.teach_sensitivity", "def get_expected_objective(self) -> float:\n # pylint: disable=invalid-name\n obj = 0.\n for gr in self.grounded.values():\n dist = gr.get_expected_dist_to_satisfaction()\n obj += 1 - self.weight * max(0, dist) ** 2\n return obj", "def sample_rate(self):\n return self._sample_rate", "def score(self):\n return self.client.call('GET', self.name + 'score')", "def cpu():\n sin = psutil.cpu_percent()\n return round(sin / 100, 3)", "def calculate_score_node(self, hypervisor):\n resource_id = \"%s_%s\" % (hypervisor.uuid, hypervisor.hostname)\n host_avg_cpu_util = self.ceilometer. \\\n statistic_aggregation(resource_id=resource_id,\n meter_name=self.HOST_CPU_USAGE_METRIC_NAME,\n period=\"7200\",\n aggregate='avg')\n\n if host_avg_cpu_util is None:\n LOG.error(\n _LE(\"No values returned by %(resource_id)s \"\n \"for %(metric_name)s\"),\n resource_id=resource_id,\n metric_name=self.HOST_CPU_USAGE_METRIC_NAME,\n )\n host_avg_cpu_util = 100\n\n cpu_capacity = self.model.get_resource_from_id(\n resource.ResourceType.cpu_cores).get_capacity(hypervisor)\n\n total_cores_used = cpu_capacity * (host_avg_cpu_util / 100)\n\n return self.calculate_weight(hypervisor, total_cores_used, 0, 0)", "def greedy_policy(self):\n return defaultdict(lambda: 0)", "def getWeights(self, gameState, action):\n # return {'successorScore': 1.0}\n if self.isOffensive:\n return self.getOffensiveWeights(gameState, action)\n else:\n return self.getDefensiveWeights(gameState, action)", "def get_score(cfg):\n key = (cfg.mut, cfg.pH)\n return lazy_load(SCORE_MAP, key, read_score, get_score_path, cfg)", "def _do_get_rate(self):\n rate = {\n 1: \"1 : Helium Probe in FAST rate\",\n 0: \"0 : Helium Probe in SLOW rate\"\n }\n result = self._execute('X')\n return rate.get(int(format(int(result[5:7]), '08b')[6]), \"Unknown\")", "def _sample_load(proc):\n return 0.01 * _for_process_and_descendants(\n psutil.Process.get_cpu_percent,\n proc,\n )", "def isFairForTeam(teamNumber):\r\n debug.write(\"Testing if experience gain is fair for player teams\", 2)\r\n if not 1 < int(teamNumber) < 4:\r\n debug.write(\"Incorrect teamnumber passed, teams are not fair\", 2)\r\n return False \r\n return bool( es.getlivingplayercount( 5 - int( teamNumber )))", "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def get_validation_performance(self) -> float:\n return self.best_performance", "def g_score(self):\n _, _, I_CK = self._entropies()\n return 2.0 * I_CK", "def compute_consistency_score(stats, col_name):\n col_stats = stats[col_name]\n if 'duplicates_score' in col_stats:\n consistency_score = (col_stats['data_type_distribution_score'] + col_stats['empty_cells_score'])/2.5 + col_stats['duplicates_score']/5\n else:\n consistency_score = (col_stats['data_type_distribution_score'] + col_stats['empty_cells_score'])/2\n return {'consistency_score': consistency_score\n ,'consistency_score_description':\"\"\"\n A high value for this score indicates that the data in a column is not very consistent, it's either missing a lot of valus or the type of values it has varries quite a lot (e.g. combination of strings, dates, integers and floats).\n The data consistency score is mainly based upon the Data Type Distribution Score and the Empty Cells Score, the Duplicates Score is also taken into account if present but with a smaller (2x smaller) bias.\n \"\"\"}", "def compute_statistics(self):" ]
[ "0.67016745", "0.5999203", "0.59884006", "0.58963954", "0.56670684", "0.5645477", "0.54838866", "0.5344908", "0.53396153", "0.5330606", "0.53292984", "0.5284739", "0.52752274", "0.5267313", "0.52610236", "0.5256685", "0.52457917", "0.52381456", "0.5232188", "0.52058727", "0.5198721", "0.5190431", "0.5178682", "0.517371", "0.5166543", "0.516368", "0.514149", "0.5099078", "0.5088327", "0.5085441", "0.50820494", "0.50760615", "0.50752306", "0.5064788", "0.5057971", "0.5057971", "0.5057971", "0.5057971", "0.5057971", "0.5057971", "0.5057971", "0.50518316", "0.5039877", "0.50345504", "0.5023253", "0.49993956", "0.4997206", "0.49930412", "0.4991662", "0.49909654", "0.49895397", "0.49739942", "0.49673203", "0.4967099", "0.49551737", "0.49544373", "0.49450794", "0.49450794", "0.49450794", "0.49413994", "0.49353296", "0.49342972", "0.49244305", "0.49196982", "0.49130335", "0.49126056", "0.49126056", "0.49126056", "0.4906558", "0.49017525", "0.4895652", "0.48862305", "0.4873593", "0.48708823", "0.48708823", "0.48689604", "0.4865956", "0.48650384", "0.48603737", "0.48595163", "0.48595163", "0.4856451", "0.4856183", "0.48545286", "0.48521486", "0.48486575", "0.48471516", "0.48404327", "0.48404035", "0.48363903", "0.483522", "0.4830384", "0.48284987", "0.4818715", "0.4814509", "0.4813103", "0.48061332", "0.48036546", "0.48015806", "0.48009583" ]
0.68944013
0
Gets the fairness threshold
def _get_fairness_threshold(self): if self.fairness_threshold == "auto": if self._get_ml_task() in [ BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION, ]: thresholds = { "demographic_parity_difference": 0.1, "demographic_parity_ratio": 0.8, "equalized_odds_difference": 0.1, "equalized_odds_ratio": 0.8, } return thresholds.get(self._fairness_metric, 0.8) elif self._get_ml_task() == REGRESSION: thresholds = { "group_loss_ratio": 0.8, } if self._fairness_metric == "group_loss_difference": raise AutoMLException( "We can't set default fairness threshold value. Please set `fairness_threshold` value in AutoML constructor." ) return thresholds.get(self._fairness_metric, 0.8) else: return deepcopy(self.fairness_threshold)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FastConvergenceThreshold(self):\n\t\treturn self._get_attribute('fastConvergenceThreshold')", "def threshold(self) -> float:\n return pulumi.get(self, \"threshold\")", "def reward_threshold(self) -> Optional[float]:", "def get_performance_threshold(self):\n\n if Test.performance_params: return float(Test.performance_params[0])\n elif self._check_performance: return self._performance_threshold\n else: return None", "def _get_fairness_metric(self):\n self._validate_fairness_metric()\n if self.fairness_metric == \"auto\":\n if self._get_ml_task() == BINARY_CLASSIFICATION:\n return \"demographic_parity_ratio\"\n if self._get_ml_task() == REGRESSION:\n return \"group_loss_ratio\"\n if self._get_ml_task() == MULTICLASS_CLASSIFICATION:\n return \"demographic_parity_ratio\"\n else:\n return deepcopy(self.fairness_metric)", "def getThreshold(self): # real signature unknown; restored from __doc__\n pass", "def threshold(self, value):\r\n threshold = 0.5\r\n if value >= threshold:\r\n return 1\r\n else:\r\n return 0", "def f(ns, k):\n ns = list(enumerate(sorted(ns)))\n n = len(ns)\n min_u = unfair(ns[:k])\n nns = ns[k:]\n def g(mu, n):\n (i, n) = n\n unfairness = n - ns[i - k + 1][1]\n # print(i, n, unfairness)\n if unfairness < mu:\n return unfairness\n return mu\n return reduce(g, nns, min_u)", "def threshold(self):\n return self._threshold", "def threshold(self):\n return self._threshold", "def thread_priority(self) -> \"int\":\n return _beamforming_swig.randomsampler_sptr_thread_priority(self)", "def thresholdfactor(self):\n return self.__thresholdfactor", "def find_metric_threshold(self):\n logger.info(\"compute metric threshold\")\n\n ### Beaucoup trop lent quand on a beaucoup de models ###\n\n df_results_not_aggregated = self.result_reader.load_all_results(aggregate=False)\n\n if len(df_results_not_aggregated) == 0:\n logger.info(\"threshold = None\")\n return None\n\n main_scorer = \"test_%s\" % self.job_config.main_scorer\n (df_results_not_aggregated[main_scorer].fillna(df_results_not_aggregated[main_scorer].min(), inplace=True))\n min_cv = df_results_not_aggregated.groupby(\"job_id\")[main_scorer].min().values\n delta_min_max_cv = np.median(\n df_results_not_aggregated.groupby(\"job_id\")[main_scorer].apply(lambda x: x.max() - x.min())\n )\n\n if len(min_cv) <= self.min_nb_of_models:\n logger.info(\"threshold = None\")\n return None\n\n min_cv = -np.sort(-min_cv)\n result = min_cv[self.min_nb_of_models] - delta_min_max_cv\n\n # result = np.percentile( min_cv, self._get_quantile(len(min_cv)) * 100)\n # TODO : ici peut etre faire une estimation parametric du quantile avec un Kernel, plus smooth et moins sensible quand peu de valeurs\n\n logger.info(\"threshold : %2.2f\" % result)\n return result", "def is_fair(self):\n fairness = Fairness(experience_weight=1)\n if fairness.is_fair(self):\n return 'This trade is fair!'\n return 'This trade is unfair!'", "def get_wait_time(*args, threshold: float = 0.9, rate_limit_header: str = \"X-Shopify-Shop-Api-Call-Limit\"):\n # average load based on threshold\n mid_load = threshold / 2\n # find the requests.Response inside args list\n for arg in args:\n response = arg if isinstance(arg, requests.models.Response) else None\n # Get the rate_limits from response\n rate_limits = response.headers.get(rate_limit_header) if response else None\n # define current load from rate_limits\n if rate_limits:\n current_rate, max_rate_limit = rate_limits.split(\"/\")\n load = int(current_rate) / int(max_rate_limit)\n else:\n load = None\n # define wait_time based on load conditions\n if not load:\n # when there is no rate_limits from header, use the `sleep_on_unknown_load`\n wait_time = ShopifyRateLimiter.on_unknown_load\n elif load >= threshold:\n wait_time = ShopifyRateLimiter.on_high_load\n elif load >= mid_load:\n wait_time = ShopifyRateLimiter.on_mid_load\n elif load < mid_load:\n wait_time = ShopifyRateLimiter.on_low_load\n return wait_time", "def test_soft_threshold():\n assert snet.soft_threshold(10, 100) == 0\n assert snet.soft_threshold(-10, 100) == 0\n assert snet.soft_threshold(10, 3) == 7\n assert snet.soft_threshold(-10, 3) == -7", "def test_estimate_statistics_priority(self):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"b\", 3.0)\n self.assertEqual(s.estimate_statistics(), 5.0)", "def actualthreshold(self):\n return self._actualthreshold", "def max_staleness(self) -> str:\n return pulumi.get(self, \"max_staleness\")", "def fairness_discrepancy(props, n_classes, norm=0):\n # unique, freq = np.unique(data, return_counts=True)\n # props = freq / len(data) #Proportion of data that belongs to that data\n \n # #------------------Modification to correct the zero support problem------------------------------------------------\n # temp=np.zeros(n_classes)\n # temp[unique]=props\n # props=temp\n # #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n #(Remove Normalisation)\n l2_fair_d = np.sqrt(((props - truth)**2).sum())\n l1_fair_d = abs(props - truth).sum()\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes) \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"L2\"), l1_fair_d/metric_max(n_classes,\"L1\"),info_spec/metric_max(n_classes,\"Is\"),specificity,wd/metric_max(n_classes,\"Wd\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity", "def get_threshold(self):\n confs = self.confidence[\"conf\"]\n\n return compute_minimum_kernel_density(confs)", "def get_threshold(self, cat):\n t = self.con.execute('select threshold from ct where category=\"%s\"'\n %(cat)).fetchone()\n \n if t is None:\n return 1.0\n \n return self.thresholds[cat]", "def active_thread_priority(self) -> \"int\":\n return _beamforming_swig.randomsampler_sptr_active_thread_priority(self)", "def test_get_score():\n\n assert sequence_threshold.get_score([]) == 0\n assert sequence_threshold.get_score(SortedSet()) == 0\n assert sequence_threshold.get_score(list(range(3, 36))) == 3\n assert sequence_threshold.get_score([10, 11, 12, 14, 16, 17]) == 10 + 14 + 16", "def _find_threshold(self, feature, y_train, num_class):\n assert len(num_class) == 2, \"This function only assumes work with binary classification.\"\n best_threshold = 0.0\n max_exact_classification = 0.0\n is_positive_negative = False\n sorted_feature = sorted(np.unique(feature))\n for i in range(len(sorted_feature)-1):\n # assume the value less than threshold is negative (0), greater than threshold is positive (1)\n threshold = (sorted_feature[i] + sorted_feature[i+1]) / 2\n left_partition = y_train[feature < threshold]\n right_partition = y_train[feature > threshold]\n negative_positive = ((len(left_partition[left_partition == 0]) + len(right_partition[right_partition == 1]))\n / len(feature))\n # assume the value less than threshold is positive (1), greater than threshold is negative. (0)\n positive_negative = ((len(left_partition[left_partition == 1]) + len(right_partition[right_partition == 0]))\n / len(feature))\n # make decision here\n is_positive_negative = positive_negative > negative_positive\n choose = positive_negative if is_positive_negative else negative_positive\n if max_exact_classification < choose:\n max_exact_classification = choose\n best_threshold = threshold\n return best_threshold, is_positive_negative", "def thread_priority(self):\n return _spacegrant_swig.general_burster_2_sptr_thread_priority(self)", "def _determine_threshold(threshold, clip_min=0.1, clip_max=0.9):\n if threshold != -1:\n return threshold\n\n path = os.path.join(os.path.dirname(cfg.predictions_path), 'thresholds.p')\n\n if not os.path.isfile(path):\n print('Warning: Defaulting to threshold of 0.5')\n return 0.5\n\n with open(path, 'rb') as f:\n thresholds = pickle.load(f)\n return np.clip(thresholds, clip_min, clip_max)", "def thread_priority(self):\n return _TestA_swig.my_qpsk_demod_cb_sptr_thread_priority(self)", "def find_optimal_threshold(self, hist):\n k = 256\n threshold = int(k / 2)\n lastexpected1 = lastexpected2 = 0\n\n while True:\n expected1 = expected2 = 0\n t_exp1 = sum(hist[:threshold])\n t_exp2 = sum(hist[threshold:])\n for i in range(threshold):\n expected1 += (hist[i] / t_exp1) * i\n\n for i in range(threshold, k):\n expected2 += (hist[i] / t_exp2) * i\n\n threshold = (expected1 + expected2) / 2\n if abs(expected1 - lastexpected1) != 0 and abs(expected2 - lastexpected2) != 0:\n break\n lastexpected1 = expected1\n lastexpected2 = expected2\n # print(expected2, expected1)\n return threshold", "def threshold_selection(prevalence, CostFP_minus_CostTN, CostFN_minus_CostTP, y, y_hat):\n fpr, tpr, thresholds = roc_curve(y, y_hat)\n m = ((1 - prevalence) / prevalence) * ((CostFP_minus_CostTN) / (CostFN_minus_CostTP))\n fm_thresholds = []\n for i in range(len(fpr)):\n fm = tpr[i] - (m * fpr[i])\n fm_thresholds.append((thresholds[i], fm))\n fm_thresholds = sorted(fm_thresholds, key=lambda fm_value: fm_value[1], reverse=True)\n return fm_thresholds[0][0]", "def get_throttle_factor(self): # pragma: no cover\n raise NotImplementedError()", "def sampling_priority(self):\n # type: () -> Optional[NumericType]\n return self._metrics.get(SAMPLING_PRIORITY_KEY)", "def threshold(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"threshold\")", "def MinRandomLoadRate(self):\n\t\treturn self._get_attribute('minRandomLoadRate')", "def fair_use_delay(self):\n if foo._error_count > 1:\n delay = foo(120, 30 * (2 ** (foo._error_count - 2)))\n else:\n delay = foo._next_call_timestamp - foo(foo.time())\n if delay > 0 and foo.respect_fair_use_policy:\n foo.info('Sleeping for %s seconds' % delay)\n foo.sleep(delay)", "def calculate_request_threshold(self, requests_per_second):\n request_threshold = 1.0 / float(requests_per_second)\n return request_threshold", "def determine_threshold(yval,pval):\n\n F1 = 0\n epsilon = 0\n for _epsilon in np.linspace(min(pval),max(pval),1000):\n ## Compute stats\n _F1,stats = evaluate_epsilon(yval,pval,_epsilon)\n\n if _F1 > F1:\n F1 = _F1\n epsilon = _epsilon\n print(\"Better threshold found! {} ==> F1 {}\".format(epsilon,F1))\n \n return epsilon, F1", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def get_random_cpu_load():\n load = random.gauss(55, 10)\n if load < 0:\n return 0.0\n elif load > 100:\n return 100.0\n else:\n return round(load, 1)", "def getFrequencyThreshold(self):\n return self.getOrDefault(self.frequencyThreshold)", "def _compute_sampling_threshold(global_step, k):\n return k / (k + math.exp(global_step / k))", "def balanced_accuracy(self):\n return 0.5 * (self.sensitivity + self.specificity)", "def test_samples_close_to_inclusion_probability_priority(self):\n # The range we allow around 0.5n\n distance_from_half = 0.01\n # The number of elements we use (computed using Chernoff bounds)\n n = int((6.0 / (distance_from_half**2)) *\n math.log(2 * FAILURE_PROBABILITY_INVERSE, math.e) + 1)\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n for i in range(n):\n s.process(i, 1.0)\n self.assertGreaterEqual(len(s.elements), (0.5 - distance_from_half) * n)\n self.assertLessEqual(len(s.elements), (0.5 + distance_from_half) * n)", "def random_prob(prob_thresh: float):\n seed = time.time()\n random.seed(seed)\n return prob_thresh > random.uniform(0, 1)", "def percent_to_class(prc, fair):\n assert len(prc) == 1, \"Should be only one column.\"\n prc = prc[0]\n\n # Threshold between fair and unfair.\n tsh_fair = 0.1\n # Threshold between unfair and very unfair.\n tsh_unfair = 0.4\n\n dif = (fair - prc) / fair\n if dif < -1 * tsh_unfair:\n # We are much higher than fair.\n cls = 4\n elif -1 * tsh_unfair <= dif < -1 * tsh_fair:\n # We are not that much higher than fair.\n cls = 3\n elif -1 * tsh_fair <= dif <= tsh_fair:\n # We are fair.\n cls = 2\n elif tsh_fair < dif <= tsh_unfair:\n # We are not that much lower than fair.\n cls = 1\n elif tsh_unfair < dif:\n # We are much lower than fair.\n cls = 0\n else:\n assert False, \"This should never happen.\"\n return cls", "def success_threshold(self) -> Optional[int]:\n return pulumi.get(self, \"success_threshold\")", "def success_threshold(self) -> Optional[int]:\n return pulumi.get(self, \"success_threshold\")", "def getPriority(self, ad_count, has_winners):\n if ad_count == 0:\n return 5000\n\n if ad_count == 1:\n return 2000\n\n if has_winners:\n return 1500\n\n if ad_count == 2:\n return 1000\n\n return 0", "def _compute_thresholds(self, thresholds):\r\n thr = thresholds\r\n limit = int(1 / thresholds)\r\n thresholds = [x * thr for x in range(limit)]\r\n thresholds.append(1)\r\n return thresholds", "def weight() -> int:\n return floor(stakedTokens / MINIMUM_STAKE)", "def get_most_probable_bit(filtered_capture):\n for prob_threshold in (.9, .8, .7, .6):\n result = get_most_probable_bit_with_thres(filtered_capture, prob_threshold)\n if result is not None:\n return result\n return None", "def fairness_discrepancy(data, n_classes, norm=0):\n unique, freq = np.unique(data, return_counts=True)\n props = freq / len(data) #Proportion of data that belongs to that data\n \n #------------------Modification to correct the zero support problem------------------------------------------------\n temp=np.zeros(n_classes)\n temp[unique]=props\n props=temp\n #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n l2_fair_d = np.sqrt(((props - truth)**2).sum())/n_classes\n l1_fair_d = abs(props - truth).sum()/n_classes\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes)/n_classes \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n #Create array to populate proportions\n # props2=np.zeros(n_classes)\n # props2[unique]=props\n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n for i in props:\n f.write(\"%f \"%(i))\n f.write(\"\\n\")\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity,wd/metric_max(n_classes,\"wd\"),wds/metric_max(n_classes,\"wds\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity", "def _toss_fair_coin() -> bool:\n return random.random() > 0.5", "def percent_to_class(prc, fair):\n assert len(prc) == 1, \"Should be only one column.\"\n prc = prc[0]\n\n # Threshold between fair and unfair.\n tsh_fair = 0.1\n\n dif = (fair - prc) / fair\n if dif < -1 * tsh_fair:\n # We are much higher than fair.\n cls = 2\n elif -1 * tsh_fair <= dif <= tsh_fair:\n # We are fair.\n cls = 1\n elif tsh_fair < dif:\n # We are much lower than fair.\n cls = 0\n else:\n assert False, \"This should never happen.\"\n return cls", "def teach_sensitivity(self):\r\n return self._arm.teach_sensitivity", "def _performance(Classifier, features, labels, threshold):\n correct = 0\n for index, vector in enumerate(features):\n result = _minimal_predict(Classifier, vector, threshold)\n if result == \"Positive\" and labels[index] == 1.0 or result == \"Negative\" and \\\n labels[index] == 0.0 or result == \"Neutral\":\n correct += 1\n Classifier.performance = correct / len(labels) * 100\n return Classifier.performance", "def find_optimal_threshold(self, hist):\n\n # print(\"number of pixels using sum: \", sum(hist))\n probability = np.array((1/sum(hist))*hist)\n expected_value = probability*np.array(range(256))\n # print(\"probability: \\n\", probability)\n # print(\"expected_value: \\n\", expected_value)\n\n threshold = len(hist)/2\n temp_threshold = 0\n\n while abs(threshold - temp_threshold) > 0.001:\n temp1 = []\n temp2 = []\n print(\"New threshold: \", threshold)\n for i in range(len(hist)):\n if i < threshold:\n temp1.append(expected_value[i])\n else:\n temp2.append(expected_value[i])\n mean1 = sum(temp1)\n print(\"mean1: \\n\", mean1)\n mean2 = sum(temp2)\n print(\"mean2: \\n\", mean2)\n temp_threshold = threshold\n threshold = (mean1+mean2)/2\n print(\"threshold: \", threshold)\n print(\"temp_threshold: \", temp_threshold)\n\n return threshold", "def average_waiting(self):\n return self._average_waiting", "def get_success_probability(self):\n\t\treturn min(self.get_raw_probability(), RunOrder.MAX_PERCENTS)", "def tpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[numpy.logical_not(self.test_errors)] >= threshold) / float(numpy.sum(numpy.logical_not(self.test_errors)))", "def check_cpu_constrained():\n return psutil.cpu_percent(1) > 75", "def calculate_lower_boundary(self, divisor):\n\n # see how low you can go\n quotas = [0] * self.states\n fair_shares = [0] * self.states\n counter = 0\n lowest_divisor = 0\n prev_divisor = 0\n estimator = 1000000000\n while counter < 1000:\n for i, population in enumerate(self.populations):\n if divisor is None or population is None:\n return None\n quotas[i] = population / divisor\n fair_shares[i] = math.floor(quotas[i])\n if sum(fair_shares) != self.num_seats:\n estimator = estimator / 10\n prev_divisor = divisor\n divisor = lowest_divisor - estimator\n else:\n lowest_divisor = divisor\n divisor = prev_divisor - estimator\n if lowest_divisor == divisor:\n break\n counter += 1\n return math.ceil(lowest_divisor * 1000) / 1000", "def decision_threshold(x, y):\n \n model = DecisionTreeClassifier(max_depth=1, criterion='entropy')\n model.fit(x,y)\n print (\"-- Uncertainty Threshold: \", model.tree_.threshold[0])\n return model.tree_.threshold[0]", "def thread_priority(self):\n return _spacegrant_swig.hdlc_deframer_sptr_thread_priority(self)", "def thread_priority(self):\n return _spacegrant_swig.invert_bit_sptr_thread_priority(self)", "def worst_score(self):\r\n pass", "def calc_fair_profit(self, assignment):\n fair_profit = {t:0 for t in self.tasks}\n for agent, tasks in assignment.items():\n for task in tasks:\n fair_profit[task] += self.profit(agent, task)\n return min(fair_profit.values())", "def thread_priority(self):\n return _TestA_swig.cleanslate_sptr_thread_priority(self)", "def _estimate_threshold(self, **kwargs):\n recompute_threshold = kwargs.pop('recompute_threshold', False)\n # if threshold is in table, then use it.\n current_setting = (self.beta, self.chi2dist.kwds['df'])\n threshold = None\n res = False\n if not recompute_threshold and current_setting in _gaussian_cusum_thresholds.keys():\n for e in _gaussian_cusum_thresholds[current_setting]:\n if e[0] == self.arl:\n threshold = np.array([e[1]])\n res = True\n # if threshold not is in table, estimate it\n if threshold is None:\n len_simulation = kwargs.pop('len_simulation', None)\n if len_simulation is None:\n len_simulation = 10 * self.arl\n self.log.info(\"estimating threshold...\")\n plain_cusum = Cusum(arl=self.arl, beta=self.beta)\n plain_cusum.gamma = self.gamma\n d2_training = self.chi2dist.rvs(size=(int(len_simulation), 1))\n kwargs.pop('x', None)\n res = plain_cusum._estimate_threshold(x=d2_training, dof=self.chi2dist.kwds['df'],\n **kwargs)\n threshold = plain_cusum.threshold\n self.threshold = threshold\n return res", "def thread_priority(self):\n return _spacegrant_swig.G3RUH_descramble_sptr_thread_priority(self)", "def penalty(self):\n return 0", "def soft_thresh(x: float, lmb: float) -> float:\n if x < lmb:\n return x + lmb\n elif x > lmb:\n return x - lmb\n else:\n return 0.0", "def compute_bayesian_threshold(points, nominal_point, confidence_level):\n distances = [np.linalg.norm(p - nominal_point, ord = 1) for p in points]\n confidence_rank = min(math.ceil(len(points) * confidence_level),len(points)-1)\n #print(confidence_level, confidence_rank)\n threshold = np.partition(distances, confidence_rank)[confidence_rank]\n return threshold", "def Get_FifoThreshold_Value(self):\r\n return self.__readFromRegister(self.__REG_RW_FIFO_CTRL_REG, self.__MASK_FIFO_CTRL_REG_WTM)", "def thread_priority(self):\n return _spacegrant_swig.binary_sink_sptr_thread_priority(self)", "def get_performance(self):\n if self.skip_reference:\n return self.compare_sim.tps\n\n # Avoid divide by zero errors when the simulation is not executed.\n if self.reference_sim.tps == 0:\n return 0\n\n t0 = 1 / self.reference_sim.tps\n t1 = 1 / self.compare_sim.tps\n return 1 / (t1 - t0)", "def CountRandomLoadRate(self):\n\t\treturn self._get_attribute('countRandomLoadRate')", "def best_t(precisions, recalls, thresholds):\n f1 = [2 * (precisions[i] * recalls[i]) / (precisions[i] + recalls[i]) for i in range(0, len(thresholds))]\n return thresholds[np.argmax(f1)]", "def calc_top_k(population, agent2score, p=0.2):\n running_total = 0\n ordered_costs = [ agent2score[agent] for agent in population ]\n limit = p * sum(map(abs, ordered_costs))\n for top_cut, agent in enumerate(population, start=1):\n running_total += agent2score[agent]\n if running_total > limit:\n return top_cut", "def _thresholding(qc_value, thresholds=None):\n MAX_BOUND, MIN_BOUND = (1, 0)\n if not thresholds:\n thresholds = TaskQC.criteria['default'].copy()\n if qc_value is None or np.isnan(qc_value):\n return int(-1)\n elif (qc_value > MAX_BOUND) or (qc_value < MIN_BOUND):\n raise ValueError(\"Values out of bound\")\n if 'PASS' in thresholds.keys() and qc_value >= thresholds['PASS']:\n return 0\n if 'WARNING' in thresholds.keys() and qc_value >= thresholds['WARNING']:\n return 1\n if 'FAIL' in thresholds and qc_value >= thresholds['FAIL']:\n return 2\n if 'NOT_SET' in thresholds and qc_value >= thresholds['NOT_SET']:\n return -1\n # if None of this applies, return 'NOT_SET'\n return -1", "def throughput(self) -> Optional[int]:\n return pulumi.get(self, \"throughput\")", "def throughput(self) -> Optional[int]:\n return pulumi.get(self, \"throughput\")", "def find_optimal_threshold(self, hist):\n\n\n threshold = int((len(hist)-1)/2)\n ct = len(hist) - 1\n\n while True:\n if(ct < 1):\n break\n threshold1 = self.evalue(hist,0,threshold)\n threshold2 = self.evalue(hist,threshold,len(hist) - 2)\n nt = int((threshold1+threshold2)/2)\n ct = nt - threshold\n threshold = nt\n\n return threshold", "def _find_significance_threshold(num_points, confidence_level):\n\n min_absolute_t_value = t_distribution.ppf(\n q=(1. - confidence_level) / 2, df=num_points - 2, loc=0., scale=1.)\n\n # return numpy.power(\n # float(num_points - 2) / min_absolute_t_value ** 2 + 1, -0.5)\n\n return numpy.sqrt(\n min_absolute_t_value ** 2 /\n (min_absolute_t_value ** 2 + num_points - 2)\n )", "def _validate_threshold(self, proposal):\n threshold = proposal[\"value\"]\n if threshold <= 0:\n raise traitlets.TraitError(\"threshold must be greater than 0.\")\n return threshold", "def find_TPR_threshold(y, scores, desired_TPR):\n for threshold in np.arange(1,0,-0.01):\n y_hat = (scores>=threshold).astype(int)\n confusion = confusion_matrix(y, y_hat)\n TN, FP, FN, TP = confusion.flat\n TPR = TP / (TP + FN)\n FPR = FP / (FP + TN)\n if TPR >= desired_TPR:\n return threshold, FPR", "def cost(self):\n lg = len(self.guarantees.cnf)\n la = len(self.assumptions.cnf)\n\n \"\"\"heuristic\n Low: guarantees while assuming little (assumption set is bigger)\n High: guarantees while assuming a lot (assumption set is smaller)\"\"\"\n\n return la / lg", "def fpct(self):\n # 1 is probably the best number in most cases because the game is often CPU-bound.\n # the following number could be chosen instead someday\n tps = self.real_speed * 1000 / VIRTUAL_TIME_INTERVAL\n # Avoid unrealistic ping values.\n ping = min(self.max_ping, self.ping)\n result = int(tps * ping * config.fpct_coef) + 1\n return min(config.fpct_max, result)", "def block8_threshold(self):\n return self._safe_value(VAR_BLOCK8THRESHOLD, float)", "def test_samples_high_weight_elements_priority(self):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"b\", 3.0)\n self.assertCountEqual([\"a\", \"b\"], s.elements.keys())", "def weightedrandomchoice(items): # {{{2\n total = 0\n items.sort(reverse=True, key=lambda x:x[0])\n for item in items:\n total += item[0]\n threshold = random.uniform(0, 0.6) * total\n for item in items:\n threshold -= item[0]\n if threshold <= 0:\n return item[1]", "def thread_priority(self):\n return _spacegrant_swig.hdlc_framer_sptr_thread_priority(self)", "def find_best_threshold(y, y_hat, step_size, score_func, maximize=True):\n best_thres, best_score = 0.0, 0.0 if maximize else 1.0\n for thres in np.arange(0, 1, step_size):\n score = score_for_threshold(y, y_hat, score_func, thres)\n if (maximize and (score > best_score)) or (not maximize and (score < best_score)):\n best_score = score\n best_thres = thres\n\n return best_thres, best_score", "def threshold(activation):\n if activation >= 0.0:\n return 1\n else:\n return 0" ]
[ "0.6620242", "0.62111634", "0.604016", "0.60012925", "0.59756714", "0.5927428", "0.591201", "0.5910259", "0.5881854", "0.5881854", "0.5880676", "0.58554536", "0.5805405", "0.578478", "0.5761599", "0.5743415", "0.5742673", "0.56985474", "0.5683226", "0.56465083", "0.5641896", "0.5629673", "0.56252086", "0.5603558", "0.5587334", "0.5580204", "0.55372804", "0.5537269", "0.55172735", "0.5509283", "0.55075145", "0.5507316", "0.5485224", "0.5485224", "0.5485224", "0.5471957", "0.54673594", "0.54660416", "0.5461312", "0.54543424", "0.54543424", "0.54543424", "0.54543424", "0.54543424", "0.54522467", "0.54516226", "0.5448357", "0.54305583", "0.5425978", "0.5413959", "0.54105675", "0.5404231", "0.5404231", "0.54034674", "0.5393326", "0.53914773", "0.53909814", "0.5375962", "0.5362165", "0.53549385", "0.53503114", "0.5347312", "0.5342914", "0.53409314", "0.5330454", "0.5322351", "0.5311715", "0.53083146", "0.5299008", "0.529583", "0.5294036", "0.5292117", "0.5289897", "0.52885103", "0.5283595", "0.52769446", "0.52714294", "0.52670544", "0.5263859", "0.52631", "0.5261848", "0.5260856", "0.5245312", "0.52451694", "0.5240016", "0.52380514", "0.5236986", "0.5236986", "0.523656", "0.523643", "0.52330786", "0.52297294", "0.5224349", "0.5220735", "0.521506", "0.5213522", "0.5209251", "0.52070355", "0.5205705", "0.51989263" ]
0.74255294
0
Gets privileged groups for fair training
def _get_privileged_groups(self): if self.privileged_groups == "auto": return [] else: return deepcopy(self.privileged_groups)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_underprivileged_groups(self):\n if self.underprivileged_groups == \"auto\":\n return []\n else:\n return deepcopy(self.underprivileged_groups)", "def test_get_groups(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n \n group0.grant('Perm1', object0)\n group0.grant('Perm3', object1)\n group1.grant('Perm2', object1)\n \n self.assert_(group0 in get_groups(object0))\n self.assertFalse(group1 in get_groups(object0))\n self.assert_(group0 in get_groups(object1))\n self.assert_(group1 in get_groups(object1))\n self.assert_(len(get_groups(object1))==2)", "def test_aws_service_api_security_groups_get(self):\n pass", "def get_pingroups(self):\n return self.groups[:]", "def getGroups():\r\n return Group.getGroups()", "def get_groups_using_malware():\n global groups_using_malware\n\n if not groups_using_malware:\n groups_using_malware = rsh.groups_using_malware(get_srcs())\n \n return groups_using_malware", "def test_get_groups(self):\n pass", "def test_get_groups(self):\n pass", "def granted_groups(self):\n return [\n g\n for g in Group.objects.filter()\n if ManagedObject.objects.filter(GroupAccess.Q(g) & Q(id=self.id)).exists()\n ]", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))", "def available_groups(cls):\n raise NotImplementedError", "def get_security(self):\n users = find_root(self)['users']\n userids_and_groups = []\n for userid in self._groups:\n if userid in users:\n userids_and_groups.append({'userid':userid, 'groups':self.get_groups(userid)})\n return userids_and_groups", "def GetGroupMembers(self, group):\n return []", "def get_group_permissions (self):\n return [] # likewise with the other permission defs", "def get_group_permissions (self):\n return [] # likewise with the other permission defs", "def get_relevant_perm_groups(self):\n\n groups = Group.objects.filter(Q(name=\"everyone\") | Q(name=self.admin_group_name()) | Q(name=self.participants_group_name()))\n return groups", "def groups(self):\n return self.get_data(\"groups\")", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def groups():\n access_token = session['access_token']\n return \"%s\" % list_groups(access_token)", "def getGroups(self):\n return [g[0] for g in grp.getgrall()]", "def get_groups_using_technique():\n global groups_using_technique\n\n if not groups_using_technique:\n groups_using_technique = rsh.groups_using_technique(get_srcs())\n \n return groups_using_technique", "def get_groups(self, username):\n return []", "def list_secgroups(self, name=None):", "def get_gadm_list():\n cur = g.db.execute('select id_user from user_group where gadm == 1', [uid])\n gadm = [row[0] for row in cur.fetchall()]\n return gadm", "def _identify_groups_for_user(user):\n groups = []\n for group in user.groups.all():\n if group.name == 'WMT16' \\\n or group.name.lower().startswith('wmt') \\\n or group.name.startswith('eng2') \\\n or group.name.endswith('2eng'):\n continue\n \n if not group in groups:\n groups.append(group)\n \n return groups", "def get_groups():\n\n # FUTURE: Properly reutrn error, Mongo is giving it's own\n if current_user.groups:\n return Response(response=json.dumps([g.to_dict() for g in current_user.groups]), status=200, mimetype=\"application/json\")\n else:\n return return_json_error('No groups assigned to', 500)", "def groups(self):\r\n return users.Groups(self)", "def groups(self):\n return []", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups", "def groups(self):\n\n return ('train', 'dev', 'eval')", "def test_groups_get(self):\n pass", "def test_groups_get(self):\n pass", "def get(self, filter=None, private=None):\r\n params = base.get_params(('filter', 'private'), locals())\r\n url = '{0}/group-privileges/{1}/'.format(self.parent.parent.get_url(),\r\n self.user)\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "def create_groups():\n groups = [\"iDRAC-Administrators\", \"iDRAC-Operators\", \"iDRAC-Readonly\"]\n group_priviledges = [\"0x000001ff\", \"0x000000f9\", \"0x00000001\"]\n for host in online_hosts:\n for index in [1,2,3]:\n print index,\" \", groups[index-1]\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupName \"+groups[index-1])\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupName failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupDomain corp.inmobi.com\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupDomain failed \")\n\n result3 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupPrivilege \"+ group_priviledges[index-1])\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupPriviledge failed \")", "def get_groups(self):\n return Client._get(self)", "def test_users_groups_get(self):\n pass", "def test_get_device_groups(self):\n pass", "def test_get_groups_all(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n \n group0.set_perms(['Perm1', 'Perm2'], object0)\n group0.set_perms(['Perm1', 'Perm3'], object1)\n group1.set_perms(['Perm2'], object1)\n \n # no perms\n self.assertFalse(group1 in get_groups_all(object0, ['Perm1']))\n \n # has perms, but not the right one\n self.assertFalse(group0 in get_groups_all(object0, ['Perm3']))\n \n # has one perm, but not all\n self.assertFalse(group0 in get_groups_all(object0, ['Perm1','Perm3']))\n self.assertFalse(group0 in get_groups_all(object1, ['Perm1','Perm2']))\n \n # has single perm\n self.assert_(group0 in get_groups_all(object0, ['Perm1']))\n self.assert_(group0 in get_groups_all(object0, ['Perm2']))\n self.assert_(group1 in get_groups_all(object1, ['Perm2']))\n \n # has multiple perms\n self.assert_(group0 in get_groups_all(object0, ['Perm1','Perm2']))\n self.assert_(group0 in get_groups_all(object1, ['Perm1','Perm3']))", "def getUserGroups(self, user):\n return [gu[0] for gu in grp.getgrall() if user in gu[3]]", "def list_groups(self):\n return self.get_admin(\"groups\")", "def test_group_is_private(self):\n group = mommy.make('groups.Group', private=True)\n thread = self.create_thread(group=group)\n result = Thread.public.by_group(group)\n self.assertNotIn(thread, result)", "def get_psample_list_groups(dut):\n return st.show(dut, \"sudo psample --list-groups\", skip_tmpl=True)", "def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)", "def groups(self):\r\n return resources.Groups(self)", "def security_groups(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"security_groups\")", "def load_security_groups(self):\n url = self.lookup(\"security_groups_url\")\n groups = self._fetcher.get_entities(url)\n if groups is None:\n return\n\n group_names = [group['name']\n for group in groups if group['running_default'] is False]\n # at this point the group_names contain all the running groups in addition\n # to the groups assigned to this space.\n # That's why we need to remove the duplicates\n group_names = list(set(group_names))\n\n for name in group_names:\n self._security_groups.append({'name': name})", "def get_groups(self):\n return [self.primary_group] + list(self.secondary_groups)", "def get_group_permissions(self, obj=None):\n pass", "def private_groups(self, user=None):\n\n if user is None:\n return []\n return self._format(user.groups)", "def test_get_group(self):\n pass", "def test_api_v1_groups_get(self):\n pass", "def _accessible_courses_list_from_groups(request):\r\n courses_list = {}\r\n\r\n instructor_courses = UserBasedRole(request.user, CourseInstructorRole.ROLE).courses_with_role()\r\n staff_courses = UserBasedRole(request.user, CourseStaffRole.ROLE).courses_with_role()\r\n all_courses = instructor_courses | staff_courses\r\n\r\n for course_access in all_courses:\r\n course_key = course_access.course_id\r\n if course_key not in courses_list:\r\n course = modulestore('direct').get_course(course_key)\r\n if course is None:\r\n raise ItemNotFoundError(course_key)\r\n courses_list[course_key] = course\r\n\r\n return courses_list.values()", "def request_access_to_groups(self, ceph):\n for ceph_group in (\"volumes\", \"images\", \"vms\"):\n ceph.request_access_to_group(\n name=ceph_group,\n object_prefix_permissions={\"class-read\": [\"rbd_children\"]},\n permission=\"rwx\",\n )", "def get_groups(self, username):\n groups = []\n for group in grp.getgrall():\n if username in group.gr_mem:\n groups.append(group.gr_name)\n\n return groups", "def getGroup(group: int, name=\"\") -> list:\n groups = mongo.db.groups.find({'id':group},{'_id':0})\n userID_list = []\n user_list = []\n for entry in groups:\n if entry[\"id\"] == group:\n userID_list = userID_list + entry[\"members\"]\n if len(userID_list) != 0:\n for entry in userID_list:\n x = fetchUser(userId=entry)\n user_list = user_list + x\n return user_list", "def get_groups(self):\n user_node = self.get()\n grouplist = []\n if user_node:\n for rel in graph.match(start_node=user_node, rel_type='in'):\n grouplist.append(Usergroup(id=rel.end_node()['id']))\n return grouplist", "def get_all_groups(self):\n return self.groups + ['all']", "def get_permission_groups(self, username):\n\n if not self.check_prereqs():\n return []\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_group_membership_query,{'username_field':self.sql_username_field,'username':username,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: get_permission_groups: %s\" % (query,))\n cursor.execute(query)\n groups=[]\n desc=[i[0] for i in cursor.description]\n for row in cursor:\n self.log.debug(\"sqlflexibleauthstore: get_permission_groups: retrieved groupname from the database\")\n dictrow=dict(zip(desc,row))\n groups.append(dictrow[self.sql_groupname_field])\n return groups", "def get_groups(self, group_name):\r\n assert group_name in self.groups.keys(), group_name\r\n try:\r\n group_list = self.groups[group_name]\r\n except KeyError:\r\n raise GroupKeyError()\r\n return group_list", "def get(self, *args):\n return _libsbml.ListOfGroups_get(self, *args)", "def test_get_resource_group_member_list(self):\n pass", "def getUserGroups(self, username):\r\n return self.getUser(username).groups", "def readGroups(self):\n\t\tgroups = self._fileSystem.readGroups()\n\t\tif groups is None:\n\t\t\treturn\n\t\treturn groups", "def _get_lsp_config_frr_admin_groups(self):\n return self.__lsp_config_frr_admin_groups", "def get_list_groups(self):\n list_response = requests.get(self.groups_url, headers=self.headers)\n return list_response.json()[\"groups\"]", "def get_groups(self):\n result = self.conn.usergroup.get(status=0, output='extend', selectUsers=\"extend\")\n groups = {group[\"name\"]: Group(\n name=group[\"name\"],\n id=group[\"usrgrpid\"],\n members=group[\"users\"],\n ) for group in result}\n return groups", "def user_groups(username, htgroup_fn, strict=True):\n groups = []\n for group_name, users in read_groups(htgroup_fn, strict=strict).items():\n if username in users:\n groups.append(group_name)\n return groups", "def getMemberships(self):\n\n extra_rights = {\n 'user': ['user'],\n 'public': ['anyone'],\n 'list': [],\n }\n\n return dicts.merge(extra_rights, self.rights)", "def list_secgroups(self, name=None):\n groups = self.cloudman.network.security_groups()\n\n # print (\"TTTTT\")\n # for g in groups:\n # pprint(g)\n\n if name is not None:\n for entry in groups:\n\n if entry['name'] == name:\n groups = [entry]\n break\n\n return self.get_list(\n groups,\n kind=\"secgroup\")", "def get_groups(self, env, token, memcache_client=None):\n groups = None\n key = '%s/token/%s' % (self.reseller_prefix, token)\n cached_auth_data = memcache_client and memcache_client.get(key)\n if cached_auth_data:\n start, expiration, groups = cached_auth_data\n if time() - start > expiration:\n groups = None\n\n headers = {}\n if env.get('HTTP_AUTHORIZATION'):\n groups = None\n headers[\"Authorization\"] = env.get('HTTP_AUTHORIZATION')\n\n if not groups:\n with Timeout(self.timeout):\n conn = http_connect(self.auth_host, self.auth_port, 'GET',\n '%stoken/%s' % (self.auth_prefix, token),\n headers, ssl=self.ssl)\n\n resp = conn.getresponse()\n resp.read()\n conn.close()\n if resp.status // 100 != 2:\n return None\n expiration = float(resp.getheader('x-auth-ttl'))\n groups = resp.getheader('x-auth-groups')\n if memcache_client:\n memcache_client.set(key, (time(), expiration, groups),\n timeout=expiration)\n\n if env.get('HTTP_AUTHORIZATION'):\n account, user, sign = \\\n env['HTTP_AUTHORIZATION'].split(' ')[-1].split(':')\n cfaccount = resp.getheader('x-auth-account-suffix')\n path = env['PATH_INFO']\n env['PATH_INFO'] = \\\n path.replace(\"%s:%s\" % (account, user), cfaccount, 1)\n\n return groups", "def give_group(self, key):\n return self._grps[key]", "def trainable_push_groups(self):\n return [group for group in self.push_groups if group.trainable]", "def listGroups(self):\n return tuple(Group.create(groupName, self._modelDataManager) for groupName in self.pm_getUserManager().listGroups())", "def get_group_names(self):\r\n return self.groups.keys()", "def can_assign(userid, group):", "def test_get_resource_group_by_moid(self):\n pass", "def security_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_groups\")", "def security_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_groups\")", "def test_get_resource_group_list(self):\n pass", "def _get_check_groups(self, group=None):\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups", "def get_queryset(self):\n user = self.request.user\n return user.group_set.all()", "def iter_groups(self):\n\t\treturn iter(self._groups)", "def get_map_groups(user_map):\n pg = user_map.permission_group_user_map.all()\n gids = list(pg.values_list('group', flat=True))\n if len(gids) > 0:\n return Group.objects.filter(id__in=gids)\n\n return Group.objects.filter(name=DEFAULT_GROUP)", "def in_groups(self):\n perms = set(r.name for r in Group.query.join(Group.users).filter(User.id == self.id).all())\n return perms", "def users_groups():\n if request.method == \"GET\":\n query = {\"token\": ciconnect_api_token, \"globus_id\": session[\"primary_identity\"]}\n # Get user info to derive unix name\n user = get_user_info(session)\n unix_name = user[\"metadata\"][\"unix_name\"]\n # Get user's group membership info based on session unix name\n users_group_memberships = get_user_group_memberships(session, unix_name)\n\n multiplexJson = {}\n group_membership_status = {}\n for group in users_group_memberships:\n if group[\"state\"] not in [\"nonmember\"]:\n group_name = group[\"name\"]\n group_query = (\n \"/v1alpha1/groups/\" + group_name + \"?token=\" + query[\"token\"]\n )\n multiplexJson[group_query] = {\"method\": \"GET\"}\n group_membership_status[group_query] = group[\"state\"]\n # POST request for multiplex return\n multiplex = get_multiplex(multiplexJson)\n\n users_groups = []\n for group in multiplex:\n if (\n session[\"url_host\"][\"unix_name\"]\n in (json.loads(multiplex[group][\"body\"])[\"metadata\"][\"name\"])\n ) and (\n len(\n (json.loads(multiplex[group][\"body\"])[\"metadata\"][\"name\"]).split(\n \".\"\n )\n )\n > 1\n ):\n users_groups.append(\n (\n json.loads(multiplex[group][\"body\"]),\n group_membership_status[group],\n )\n )\n # users_groups = [group for group in users_groups if len(group['name'].split('.')) == 3]\n\n # Query user's pending project requests\n pending_project_requests = get_user_pending_project_requests(unix_name)\n # Check user's member status of root connect group\n connect_group = session[\"url_host\"][\"unix_name\"]\n user_status = get_user_connect_status(unix_name, connect_group)\n\n domain_name = domain_name_edgecase()\n\n with open(\n brand_dir\n + \"/\"\n + domain_name\n + \"/form_descriptions/group_unix_name_description.md\",\n \"r\",\n ) as file:\n group_unix_name_description = file.read()\n\n return render_template(\n \"users_groups.html\",\n groups=users_groups,\n project_requests=pending_project_requests,\n user_status=user_status,\n group_unix_name_description=group_unix_name_description,\n )", "def getListOfGroups(self, *args):\n return _libsbml.GroupsModelPlugin_getListOfGroups(self, *args)", "def get_group_access(self, group):\n return self._access_lists.get_group_access(group)", "def get_user_groups(user):\n auth_groups = user.groups.all()\n # groups = [group.profile for group in auth_group] # not working\n # todo implement better\n groups = [GroupProfile.objects.filter(group=group)[0] for group in auth_groups if GroupProfile.objects.filter(group=group).count()]\n return groups", "def test_get_groups(self):\n response = self.client.get_groups()\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"GET\")\n self.assertEqual(uri, \"/admin/v1/groups\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def get_techniques_used_by_groups():\n global techniques_used_by_groups\n\n if not techniques_used_by_groups:\n techniques_used_by_groups = rsh.techniques_used_by_groups(get_srcs())\n\n return techniques_used_by_groups", "def test_IGroupCapability(self):\n self.assertFalse(self.ldap.allowGroupAdd(\"uid0\", \"group0\"))\n self.assertFalse(self.ldap.allowGroupRemove(\"uid0\", \"group0\"))", "def get_group(tkn: Token = Depends(from_authotization_header_nondyn),):\n assert_has_clearance(tkn.owner, \"sni.read_group\")\n return [\n GetGroupShortOut(group_id=str(grp.pk), group_name=grp.group_name)\n for grp in Group.objects().order_by(\"group_name\")\n ]", "def test_get_resource_group_member_by_moid(self):\n pass", "def active_groups_anon(self):\n groups = self.active_groups()\n return [group for group in groups if group.anon_read]", "def get_private_group(invite_link):\n\n info = fetch_opengraph(invite_link)\n if not info.get('title'):\n return\n\n return {\n 'desc': info['description'],\n 'id': '',\n 'invite_link': invite_link,\n 'location': '',\n 'name': info['title'],\n 'type': '',\n 'username': '',\n }", "def groups_get(self, mar, request):\n if not mar.viewed_user_auth:\n raise exceptions.NoSuchUserException(request.groupName)\n group_id = mar.viewed_user_auth.user_id\n group_settings = self._services.usergroup.GetGroupSettings(\n mar.cnxn, group_id)\n member_ids, owner_ids = self._services.usergroup.LookupAllMembers(\n mar.cnxn, [group_id])\n (owned_project_ids, membered_project_ids,\n contrib_project_ids) = self._services.project.GetUserRolesInAllProjects(\n mar.cnxn, mar.auth.effective_ids)\n project_ids = owned_project_ids.union(\n membered_project_ids).union(contrib_project_ids)\n if not permissions.CanViewGroupMembers(\n mar.perms, mar.auth.effective_ids, group_settings, member_ids[group_id],\n owner_ids[group_id], project_ids):\n raise permissions.PermissionException(\n 'The user is not allowed to view this group.')\n\n member_ids, owner_ids = self._services.usergroup.LookupMembers(\n mar.cnxn, [group_id])\n\n member_emails = list(self._services.user.LookupUserEmails(\n mar.cnxn, member_ids[group_id]).values())\n owner_emails = list(self._services.user.LookupUserEmails(\n mar.cnxn, owner_ids[group_id]).values())\n\n return api_pb2_v1.GroupsGetResponse(\n groupID=group_id,\n groupSettings=api_pb2_v1_helpers.convert_group_settings(\n request.groupName, group_settings),\n groupOwners=owner_emails,\n groupMembers=member_emails)", "def test_get_device_groups1(self):\n pass", "def get_public_bags(store):\n usersign = {'name': 'GUEST', 'roles': []}\n for bag in store.list_bags():\n try:\n bag = store.get(bag)\n bag.policy.allows(usersign, 'read')\n yield bag\n except PermissionsError:\n pass" ]
[ "0.7512295", "0.6589516", "0.65174836", "0.6486006", "0.63847315", "0.63510704", "0.63013774", "0.63013774", "0.62872154", "0.628441", "0.6190389", "0.61468303", "0.61449814", "0.61140037", "0.61140037", "0.6090746", "0.6036777", "0.60114354", "0.6006546", "0.59940183", "0.59654236", "0.59639245", "0.593845", "0.59331065", "0.59283227", "0.5925042", "0.5922163", "0.5920583", "0.59110636", "0.59110636", "0.59110636", "0.5901571", "0.5895076", "0.5895076", "0.5889232", "0.58764076", "0.58565545", "0.5828596", "0.5817554", "0.57988745", "0.5780764", "0.5780142", "0.5766131", "0.57609135", "0.5746399", "0.5744895", "0.5738177", "0.57193923", "0.57178634", "0.5714573", "0.5714058", "0.571391", "0.5708757", "0.5705848", "0.5691556", "0.5681597", "0.56810844", "0.56634367", "0.56590235", "0.56531394", "0.5650903", "0.56502765", "0.5640915", "0.56372267", "0.5631253", "0.56237125", "0.5616458", "0.56129104", "0.56088245", "0.5605782", "0.56029975", "0.55997795", "0.5594375", "0.5586635", "0.5584813", "0.55819315", "0.55713713", "0.557066", "0.5569578", "0.5569578", "0.5568094", "0.55632526", "0.55538106", "0.5550961", "0.5549063", "0.554297", "0.5542117", "0.55399257", "0.5530078", "0.5525771", "0.5525018", "0.55134463", "0.55124545", "0.550867", "0.55009526", "0.54982513", "0.5494224", "0.5481141", "0.5479748", "0.5479558" ]
0.7552889
0
Gets underprivileged groups for fair training
def _get_underprivileged_groups(self): if self.underprivileged_groups == "auto": return [] else: return deepcopy(self.underprivileged_groups)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_privileged_groups(self):\n if self.privileged_groups == \"auto\":\n return []\n else:\n return deepcopy(self.privileged_groups)", "def get_groups_using_malware():\n global groups_using_malware\n\n if not groups_using_malware:\n groups_using_malware = rsh.groups_using_malware(get_srcs())\n \n return groups_using_malware", "def test_aws_service_api_security_groups_get(self):\n pass", "def test_get_groups(self):\n pass", "def test_get_groups(self):\n pass", "def get_pingroups(self):\n return self.groups[:]", "def test_get_groups(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n \n group0.grant('Perm1', object0)\n group0.grant('Perm3', object1)\n group1.grant('Perm2', object1)\n \n self.assert_(group0 in get_groups(object0))\n self.assertFalse(group1 in get_groups(object0))\n self.assert_(group0 in get_groups(object1))\n self.assert_(group1 in get_groups(object1))\n self.assert_(len(get_groups(object1))==2)", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))", "def _identify_groups_for_user(user):\n groups = []\n for group in user.groups.all():\n if group.name == 'WMT16' \\\n or group.name.lower().startswith('wmt') \\\n or group.name.startswith('eng2') \\\n or group.name.endswith('2eng'):\n continue\n \n if not group in groups:\n groups.append(group)\n \n return groups", "def test_get_device_groups(self):\n pass", "def getGroups():\r\n return Group.getGroups()", "def get_groups_using_technique():\n global groups_using_technique\n\n if not groups_using_technique:\n groups_using_technique = rsh.groups_using_technique(get_srcs())\n \n return groups_using_technique", "def available_groups(cls):\n raise NotImplementedError", "def test_groups_get(self):\n pass", "def test_groups_get(self):\n pass", "def test_get_resource_group_by_moid(self):\n pass", "def groups(self):\n\n return ('train', 'dev', 'eval')", "def test_users_groups_get(self):\n pass", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def get_free_standins(group):", "def list_secgroups(self, name=None):", "def test_get_group(self):\n pass", "def get_gadm_list():\n cur = g.db.execute('select id_user from user_group where gadm == 1', [uid])\n gadm = [row[0] for row in cur.fetchall()]\n return gadm", "def get_groups(self, username):\n return []", "def groups():\n access_token = session['access_token']\n return \"%s\" % list_groups(access_token)", "def test_api_v1_groups_get(self):\n pass", "def get_groups():\n\n # FUTURE: Properly reutrn error, Mongo is giving it's own\n if current_user.groups:\n return Response(response=json.dumps([g.to_dict() for g in current_user.groups]), status=200, mimetype=\"application/json\")\n else:\n return return_json_error('No groups assigned to', 500)", "def groups(self):\n return []", "def granted_groups(self):\n return [\n g\n for g in Group.objects.filter()\n if ManagedObject.objects.filter(GroupAccess.Q(g) & Q(id=self.id)).exists()\n ]", "def create_groups():\n groups = [\"iDRAC-Administrators\", \"iDRAC-Operators\", \"iDRAC-Readonly\"]\n group_priviledges = [\"0x000001ff\", \"0x000000f9\", \"0x00000001\"]\n for host in online_hosts:\n for index in [1,2,3]:\n print index,\" \", groups[index-1]\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupName \"+groups[index-1])\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupName failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupDomain corp.inmobi.com\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupDomain failed \")\n\n result3 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupPrivilege \"+ group_priviledges[index-1])\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupPriviledge failed \")", "def get_security(self):\n users = find_root(self)['users']\n userids_and_groups = []\n for userid in self._groups:\n if userid in users:\n userids_and_groups.append({'userid':userid, 'groups':self.get_groups(userid)})\n return userids_and_groups", "def GetGroupMembers(self, group):\n return []", "def get_groups(self):\n return Client._get(self)", "def test_get_device_groups1(self):\n pass", "def getGroups(self):\n return [g[0] for g in grp.getgrall()]", "def groups(self):\n return self.get_data(\"groups\")", "def get_groups_using_tool():\n global groups_using_tool\n\n if not groups_using_tool:\n groups_using_tool = rsh.groups_using_tool(get_srcs())\n\n return groups_using_tool", "def get_all_groups(self):\n return self.groups + ['all']", "def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)", "def test_get_resource_group_member_by_moid(self):\n pass", "def test_get_resource_group_list(self):\n pass", "def trainable_push_groups(self):\n return [group for group in self.push_groups if group.trainable]", "def load_security_groups(self):\n url = self.lookup(\"security_groups_url\")\n groups = self._fetcher.get_entities(url)\n if groups is None:\n return\n\n group_names = [group['name']\n for group in groups if group['running_default'] is False]\n # at this point the group_names contain all the running groups in addition\n # to the groups assigned to this space.\n # That's why we need to remove the duplicates\n group_names = list(set(group_names))\n\n for name in group_names:\n self._security_groups.append({'name': name})", "def get_relevant_perm_groups(self):\n\n groups = Group.objects.filter(Q(name=\"everyone\") | Q(name=self.admin_group_name()) | Q(name=self.participants_group_name()))\n return groups", "def get_groups(self, env, token, memcache_client=None):\n groups = None\n key = '%s/token/%s' % (self.reseller_prefix, token)\n cached_auth_data = memcache_client and memcache_client.get(key)\n if cached_auth_data:\n start, expiration, groups = cached_auth_data\n if time() - start > expiration:\n groups = None\n\n headers = {}\n if env.get('HTTP_AUTHORIZATION'):\n groups = None\n headers[\"Authorization\"] = env.get('HTTP_AUTHORIZATION')\n\n if not groups:\n with Timeout(self.timeout):\n conn = http_connect(self.auth_host, self.auth_port, 'GET',\n '%stoken/%s' % (self.auth_prefix, token),\n headers, ssl=self.ssl)\n\n resp = conn.getresponse()\n resp.read()\n conn.close()\n if resp.status // 100 != 2:\n return None\n expiration = float(resp.getheader('x-auth-ttl'))\n groups = resp.getheader('x-auth-groups')\n if memcache_client:\n memcache_client.set(key, (time(), expiration, groups),\n timeout=expiration)\n\n if env.get('HTTP_AUTHORIZATION'):\n account, user, sign = \\\n env['HTTP_AUTHORIZATION'].split(' ')[-1].split(':')\n cfaccount = resp.getheader('x-auth-account-suffix')\n path = env['PATH_INFO']\n env['PATH_INFO'] = \\\n path.replace(\"%s:%s\" % (account, user), cfaccount, 1)\n\n return groups", "def get_psample_list_groups(dut):\n return st.show(dut, \"sudo psample --list-groups\", skip_tmpl=True)", "def request_access_to_groups(self, ceph):\n for ceph_group in (\"volumes\", \"images\", \"vms\"):\n ceph.request_access_to_group(\n name=ceph_group,\n object_prefix_permissions={\"class-read\": [\"rbd_children\"]},\n permission=\"rwx\",\n )", "def users_groups():\n if request.method == \"GET\":\n query = {\"token\": ciconnect_api_token, \"globus_id\": session[\"primary_identity\"]}\n # Get user info to derive unix name\n user = get_user_info(session)\n unix_name = user[\"metadata\"][\"unix_name\"]\n # Get user's group membership info based on session unix name\n users_group_memberships = get_user_group_memberships(session, unix_name)\n\n multiplexJson = {}\n group_membership_status = {}\n for group in users_group_memberships:\n if group[\"state\"] not in [\"nonmember\"]:\n group_name = group[\"name\"]\n group_query = (\n \"/v1alpha1/groups/\" + group_name + \"?token=\" + query[\"token\"]\n )\n multiplexJson[group_query] = {\"method\": \"GET\"}\n group_membership_status[group_query] = group[\"state\"]\n # POST request for multiplex return\n multiplex = get_multiplex(multiplexJson)\n\n users_groups = []\n for group in multiplex:\n if (\n session[\"url_host\"][\"unix_name\"]\n in (json.loads(multiplex[group][\"body\"])[\"metadata\"][\"name\"])\n ) and (\n len(\n (json.loads(multiplex[group][\"body\"])[\"metadata\"][\"name\"]).split(\n \".\"\n )\n )\n > 1\n ):\n users_groups.append(\n (\n json.loads(multiplex[group][\"body\"]),\n group_membership_status[group],\n )\n )\n # users_groups = [group for group in users_groups if len(group['name'].split('.')) == 3]\n\n # Query user's pending project requests\n pending_project_requests = get_user_pending_project_requests(unix_name)\n # Check user's member status of root connect group\n connect_group = session[\"url_host\"][\"unix_name\"]\n user_status = get_user_connect_status(unix_name, connect_group)\n\n domain_name = domain_name_edgecase()\n\n with open(\n brand_dir\n + \"/\"\n + domain_name\n + \"/form_descriptions/group_unix_name_description.md\",\n \"r\",\n ) as file:\n group_unix_name_description = file.read()\n\n return render_template(\n \"users_groups.html\",\n groups=users_groups,\n project_requests=pending_project_requests,\n user_status=user_status,\n group_unix_name_description=group_unix_name_description,\n )", "def get_all(isamAppliance, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieving groups\", \"/sysaccount/groups/v1\")", "def get_techniques_used_by_groups():\n global techniques_used_by_groups\n\n if not techniques_used_by_groups:\n techniques_used_by_groups = rsh.techniques_used_by_groups(get_srcs())\n\n return techniques_used_by_groups", "def get_current_grp():\n return get_group_grp(os.getgid())", "def get_malware_used_by_groups():\n global malware_used_by_groups\n\n if not malware_used_by_groups:\n malware_used_by_groups = rsh.malware_used_by_groups(get_srcs())\n\n return malware_used_by_groups", "def capacitygroup_group():", "def give_group(self, key):\n return self._grps[key]", "def test_get_groups_all(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n \n group0.set_perms(['Perm1', 'Perm2'], object0)\n group0.set_perms(['Perm1', 'Perm3'], object1)\n group1.set_perms(['Perm2'], object1)\n \n # no perms\n self.assertFalse(group1 in get_groups_all(object0, ['Perm1']))\n \n # has perms, but not the right one\n self.assertFalse(group0 in get_groups_all(object0, ['Perm3']))\n \n # has one perm, but not all\n self.assertFalse(group0 in get_groups_all(object0, ['Perm1','Perm3']))\n self.assertFalse(group0 in get_groups_all(object1, ['Perm1','Perm2']))\n \n # has single perm\n self.assert_(group0 in get_groups_all(object0, ['Perm1']))\n self.assert_(group0 in get_groups_all(object0, ['Perm2']))\n self.assert_(group1 in get_groups_all(object1, ['Perm2']))\n \n # has multiple perms\n self.assert_(group0 in get_groups_all(object0, ['Perm1','Perm2']))\n self.assert_(group0 in get_groups_all(object1, ['Perm1','Perm3']))", "def getUserGroups(self, user):\n return [gu[0] for gu in grp.getgrall() if user in gu[3]]", "def get_groups(self):\n return [self.primary_group] + list(self.secondary_groups)", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups", "def RetrieveWorkerInGroup(**argd):\n checkSign = argd[\"nsid\"] + \",\" + argd[\"renid\"]\n token = EncryptUtil.DecodeURLSafeBase64(argd[\"token\"])\n try:\n tokenRet = EncryptUtil.VerifySign(checkSign, token, GlobalConfigContext.AUTH_NameService_PublicKey)\n except:\n tokenRet = False\n if tokenRet is False:\n return CGateway._UnauthorizedServiceResponse(token)\n flag1, ret1 = CGateway.core.RetrieveHumanInGroup(GlobalConfigContext.AUTH_INTERNAL_SESSION, argd[\"groupName\"])\n flag2, ret2 = CGateway.core.RetrieveAgentInGroup(GlobalConfigContext.AUTH_INTERNAL_SESSION, argd[\"groupName\"])\n return CGateway._DumpResponse(ret1 + ret2)", "def groups(self):\r\n return users.Groups(self)", "def test_groups_group_users_get(self):\n pass", "def test_groups_group_users_get(self):\n pass", "def groups(self):\r\n return resources.Groups(self)", "def get(self, *args):\n return _libsbml.ListOfGroups_get(self, *args)", "def _get_check_groups(self, group=None):\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups", "def user_groups(username, htgroup_fn, strict=True):\n groups = []\n for group_name, users in read_groups(htgroup_fn, strict=strict).items():\n if username in users:\n groups.append(group_name)\n return groups", "def test_get_resource_group_member_list(self):\n pass", "def get_groups(self):\n user_node = self.get()\n grouplist = []\n if user_node:\n for rel in graph.match(start_node=user_node, rel_type='in'):\n grouplist.append(Usergroup(id=rel.end_node()['id']))\n return grouplist", "def security_groups(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"security_groups\")", "def _get_lsp_config_frr_admin_groups(self):\n return self.__lsp_config_frr_admin_groups", "def get_groups(self, username):\n groups = []\n for group in grp.getgrall():\n if username in group.gr_mem:\n groups.append(group.gr_name)\n\n return groups", "def getGroup(group: int, name=\"\") -> list:\n groups = mongo.db.groups.find({'id':group},{'_id':0})\n userID_list = []\n user_list = []\n for entry in groups:\n if entry[\"id\"] == group:\n userID_list = userID_list + entry[\"members\"]\n if len(userID_list) != 0:\n for entry in userID_list:\n x = fetchUser(userId=entry)\n user_list = user_list + x\n return user_list", "def test_get_groups(self):\n response = self.client.get_groups()\n uri, args = response[\"uri\"].split(\"?\")\n\n self.assertEqual(response[\"method\"], \"GET\")\n self.assertEqual(uri, \"/admin/v1/groups\")\n self.assertEqual(util.params_to_dict(args), {\"account_id\": [self.client.account_id]})", "def list_groups(self):\n return self.get_admin(\"groups\")", "def get_group_names(self):\r\n return self.groups.keys()", "def get_group_permissions (self):\n return [] # likewise with the other permission defs", "def get_group_permissions (self):\n return [] # likewise with the other permission defs", "def pull_groups(self, org):\n pass", "def testGroupsNotReturnedByEnumerateUsers(self):\n results = self.pas.searchUsers()\n resultIds = [a[\"id\"] for a in results]\n self.assertFalse(\"group1\" in resultIds)", "def pull_all_rhds_group(self):\n return self.ldap_connection.search_s(\"ou=managedGroups,dc=redhat,dc=com\",\n ldap.SCOPE_SUBTREE)", "def test_user_group_controller_list(self):\n pass", "def list_groups(args):\n\n for group in get_groups(args):\n print(group)", "def list_groups(access_token):\n request_url = OKTA_URL + \"api/v1/groups\"\n headers = {\"Authorization\": \"Bearer \" + access_token}\n group_request = requests.get(request_url, headers=headers).json()\n return group_request", "def get_map_groups(user_map):\n pg = user_map.permission_group_user_map.all()\n gids = list(pg.values_list('group', flat=True))\n if len(gids) > 0:\n return Group.objects.filter(id__in=gids)\n\n return Group.objects.filter(name=DEFAULT_GROUP)", "def test_group_is_private(self):\n group = mommy.make('groups.Group', private=True)\n thread = self.create_thread(group=group)\n result = Thread.public.by_group(group)\n self.assertNotIn(thread, result)", "def get_group(tkn: Token = Depends(from_authotization_header_nondyn),):\n assert_has_clearance(tkn.owner, \"sni.read_group\")\n return [\n GetGroupShortOut(group_id=str(grp.pk), group_name=grp.group_name)\n for grp in Group.objects().order_by(\"group_name\")\n ]", "def groups(user=None):\n command = \"groups {0}\".format(user) if user else \"groups\"\n system_command(command)", "def active_groups_anon(self):\n groups = self.active_groups()\n return [group for group in groups if group.anon_read]", "def groupsChanged(self):\n # Get the list of groups for the present user according to\n # the checklist.\n nglist = []\n for r in self.liststore:\n if (r[1] and (r[0] != self.gidnm)):\n nglist.append(r[0])\n if (gui.getUserGroups(gui.currentUser) != nglist):\n return nglist\n else:\n return None", "def test_06_self_cannot_upgrade_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_group_with_user(\n meowers, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_group_with_user(\n meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))", "def list_user_groups(self, token):\n requestUser = self.get_username_from_token(token)\n dataBase = self.read_database()\n groups = dataBase['userGroups']\n groupList = list()\n for group in groups:\n members = groups[group]['members']\n owners = groups[group]['owners']\n if requestUser in members or requestUser in owners:\n groupList.append(group)\n return groupList", "def api_groups(self):\n return self._api_groups", "def security_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_groups\")", "def security_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_groups\")", "def get_voting_group_for(userid):", "def test_get_groups_3(\n self, management_client, internal_client, inventory_attributes\n ):\n\n did = \"some-device-id\"\n internal_client.create_device(did, inventory_attributes)\n for i in range(10):\n group = management_client.group(group=\"group\" + str(i))\n management_client.addDeviceToGroup(group, did)\n\n assert len(management_client.getAllGroups()) == 1", "def readGroups(self):\n\t\tgroups = self._fileSystem.readGroups()\n\t\tif groups is None:\n\t\t\treturn\n\t\treturn groups", "def getNodeLVMGroups(self,node):\n data = self.connect('get','nodes/%s/scan/lvm' % (node),None)\n return data" ]
[ "0.74663794", "0.6593482", "0.6520235", "0.65086764", "0.65086764", "0.6437184", "0.6431571", "0.6334094", "0.61589926", "0.6144109", "0.6134614", "0.6120121", "0.60976017", "0.60939455", "0.60939455", "0.6093541", "0.60102457", "0.60085875", "0.59747267", "0.5969062", "0.59649074", "0.5951997", "0.59271246", "0.5921322", "0.5905782", "0.5903327", "0.5902226", "0.5870792", "0.5867802", "0.58635926", "0.58566886", "0.58506244", "0.5834576", "0.58330476", "0.5831491", "0.5776808", "0.5776126", "0.5764601", "0.57597476", "0.57557786", "0.5735616", "0.5730046", "0.57277614", "0.5726236", "0.5717723", "0.5715284", "0.56994003", "0.56939805", "0.56894124", "0.56752896", "0.5663992", "0.5656273", "0.5652241", "0.5636954", "0.563475", "0.5628824", "0.56093115", "0.56071", "0.56071", "0.56071", "0.560367", "0.55783707", "0.5578361", "0.5578361", "0.55774015", "0.5576021", "0.55684024", "0.55649924", "0.55231524", "0.55188066", "0.5511155", "0.55096227", "0.550636", "0.550329", "0.5496681", "0.5492093", "0.5487497", "0.546315", "0.546315", "0.54584026", "0.545661", "0.544947", "0.5446473", "0.5438882", "0.54388314", "0.5436783", "0.5432808", "0.5429596", "0.54227924", "0.54171485", "0.5406535", "0.5400508", "0.539595", "0.5395571", "0.5386632", "0.5386632", "0.5378314", "0.53743964", "0.53673124", "0.5365838" ]
0.76907
0
Handle add host request
def __add_host(self, host_form): try: host_object = Host.objects.get( host_name=host_form.cleaned_data['host_name'] ) for field in host_form.cleaned_data: setattr( host_object, field, host_form.cleaned_data[field] ) host_object.save() return HttpResponseRedirect(reverse('log_collector:index')) except errors.ObjectDoesNotExist: return self.form_valid(host_form)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request_add_host(request, address):\n\n request.setdefault('headers', {})\n request['headers'].setdefault('Host', address)\n\n return request", "def add_host_entry(ip, hostname, domain):\n host_line = ip+\" \"+hostname+\".\"+domain+\" \"+hostname\n\n # Only add entry if it does not exist already. We don't want warnings about\n # grep not finding the entry, as that's to be expected.\n with hide(\"warnings\"), settings(warn_only=True):\n if run(\"grep \\\"\"+host_line+\"\\\" /etc/hosts\").failed:\n sudo(\"echo \"+host_line+\" >> /etc/hosts\")", "def headers_add_host(headers, address):\n\n headers.setdefault('Host', address)\n\n return headers", "def append_allowed_hosts(self, hostname):\r\n settings.ALLOWED_HOSTS.append(hostname)\r\n self.addCleanup(settings.ALLOWED_HOSTS.pop)", "def add_host(self, name, ip):\n rdataa = dns.rdata.from_text(dns.rdataclass.IN,dns.rdatatype.A,str(ip))\n rdataseta = dns.rdataset.from_rdata(300,rdataa)\n self.update.add(name,rdataseta)\n return dns.query.tcp(self.update,self.server_address)", "def handle_host(self, host):\n LOG.info('FakeHandler: handle host %s' % host)", "def AddHost(parser):\n parser.add_argument(\n '--host',\n help=(\n \"Cloud SQL user's hostname expressed as a specific IP address or\"\n ' address range. `%` denotes an unrestricted hostname. Applicable'\n ' flag for MySQL instances; ignored for all other engines. Note, if'\n ' you connect to your instance using IP addresses, you must add your'\n ' client IP address as an authorized address, even if your hostname'\n ' is unrestricted. For more information, see [Configure'\n ' IP](https://cloud.google.com/sql/docs/mysql/configure-ip).'\n ),\n )", "def add_host(self, group, host):\n if group not in self.inventory:\n self.add_inventory_group(group)\n\n if host not in self.inventory[group]['hosts']:\n self.inventory[group]['hosts'].append(host)\n return", "def test_host_header(self):\n hostname = b\"server_name_1\"\n\n def update_expected_server(expected):\n expected[3][\"attributes\"].update(\n {\"http.server_name\": hostname.decode(\"utf8\")}\n )\n return expected\n\n self.scope[\"headers\"].append([b\"host\", hostname])\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_server])", "def add(self, host, **kwargs):\n self.configs_[0][1].add(host, **kwargs)", "def test_host_header(self):\n hostname = b\"server_name_1\"\n\n def update_expected_server(expected):\n expected[3][\"attributes\"].update(\n {SpanAttributes.HTTP_SERVER_NAME: hostname.decode(\"utf8\")}\n )\n return expected\n\n self.scope[\"headers\"].append([b\"host\", hostname])\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_server])", "def process_request(self, request):\n\n try:\n request.get_host()\n except DisallowedHost:\n if not request.META['HTTP_HOST'] == '45.56.115.140':\n logger.critical(request.META)", "def add_host():\n # Attempt to recieve POST data\n name = None\n ip = None\n mac = None\n state = None\n if not request.json:\n abort(400)\n try:\n name = request.json.get('deviceName')\n ip = request.json.get('ip')\n mac = request.json.get('mac')\n except:\n abort(400)\n try: # Try to get the state, otherwise default it to off and let the daemon clean up\n state = request.json.get('state')\n if state == None:\n state = 'off'\n except:\n state = 'off'\n # Perform the transaction itself\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n ret = hosts.add(db, name, ip, mac, state)\n ret = {'sid': ret}\n db.commit()\n ret = {'add': ret}\n return jsonify(ret)", "def add_host_to_checkmk(hostname, hostlabels):\n\n logging.debug('going to add %s with hostlabels %s' % (hostname, hostlabels))\n\n checkmk_api_url = config['checkmk_api_url']\n checkmk_api_username = config['checkmk_api_username']\n checkmk_api_secret = config['checkmk_api_secret']\n checkmk_default_folder = config['checkmk_default_folder']\n checkmk_default_location = config['checkmk_default_location']\n checkmk_puppetdb_label = config['checkmk_puppetdb_label']\n\n hostlabels['from_puppetdb'] = checkmk_puppetdb_label\n\n # Determine if host is dual stacked v4/v6 and include ip-v4v6\n # address_family if so, else leave address_family off to use default\n try:\n d = dns.resolver.resolve(hostname, 'AAAA')\n logging.debug('-- host appears dual stacked, adding ip-v4v6')\n payload = {'request': json.dumps({\n 'hostname': hostname,\n 'folder': checkmk_default_folder,\n 'attributes': {\n 'tag_location': checkmk_default_location,\n 'tag_address_family': 'ip-v4v6',\n 'labels': hostlabels\n }\n })}\n except Exception as e:\n logging.debug('-- host not dual stacked')\n payload = {'request': json.dumps({\n 'hostname': hostname,\n 'folder': checkmk_default_folder,\n 'attributes': {\n 'tag_location': checkmk_default_location,\n 'labels': hostlabels\n }\n })}\n\n logging.debug('-- adding host %s', hostname)\n r = requests.post(\"%s?action=add_host&_username=%s&_secret=%s\" % (checkmk_api_url, checkmk_api_username, checkmk_api_secret), data=payload)\n logging.debug('-- got resp code = %d' % r.status_code)\n logging.debug('-- got resp text = %s' % r.text)\n r_json = json.loads(r.text)\n\n # Successful add_host gives response of {\"result\": null, \"result_code\": 0}\n if r_json['result_code'] == 0 and r_json['result'] is None:\n logging.info('added host %s successfully', hostname)\n else:\n logging.warn('failed to add host %s', r_json['result'])", "def add_hosts(self, hosts):\n for host in hosts:\n if host not in self.__hosts__:\n self.__hosts__.append(KnownHostsHost(host))", "def getHost():", "def getHost():", "def add_or_remove_host(self, event):\n try:\n host = self.caller.search(self.lhs).Dominion\n except AttributeError:\n return\n if event:\n if host == event.main_host:\n raise self.CalCmdError(\"The main host cannot be removed.\")\n if host in event.hosts:\n event.change_host_to_guest(host)\n msg = \"Changed host to a regular guest. Use /uninvite to remove them completely.\"\n else:\n event.add_host(host)\n msg = \"%s added to hosts.\" % host\n else:\n hosts = self.project[\"hosts\"]\n if host.id in hosts:\n hosts.remove(host.id)\n if host.id not in self.project[\"invites\"]:\n self.project[\"invites\"].append(host.id)\n msg = \"Changed host to a regular guest. Use /uninvite to remove them completely.\"\n else:\n hosts.append(host.id)\n if host.id in self.project[\"invites\"]:\n self.project[\"invites\"].remove(host.id)\n msg = \"%s added to hosts.\" % host\n self.msg(msg)", "def register_router(self, hostname, expire=-1):", "def add_host(self, ipv4, rem_dpid, rem_port):\n assert(ipv4 is not None)\n assert(rem_dpid is not None)\n assert(rem_port is not None)\n LOG.info(\"Try to add host=%s -> (%s:%d)\" % (ipv4, rem_dpid, rem_port))\n\n ip_ = convert_ipv4_to_int(ipv4)\n self.add_node(ip_)\n self.add_link(ip_, 0, rem_dpid, rem_port)\n self.add_link(rem_dpid, rem_port, ip_, 0)", "def host_action(self, host, action):\n url = '/os-hosts/{0}/{1}'.format(host, action)\n return self._get(url, response_key='host')", "def host(self, host) :\n\t\ttry :\n\t\t\tself._host = host\n\t\texcept Exception as e:\n\t\t\traise e", "def add(self, host, **kwargs):\n if host in self.hosts_:\n raise ValueError(\"Host %s: exists (use update).\" % host)\n self.hosts_.add(host)\n self.lines_.append(ConfigLine(line=\"\", host=None))\n self.lines_.append(ConfigLine(line=\"Host %s\" % host, host=host, key=\"Host\", value=host))\n for k, v in kwargs.items():\n if type(v) not in [list, tuple]:\n v = [v]\n mapped_k = _remap_key(k)\n for value in v:\n new_line = self._new_line(mapped_k, value)\n self.lines_.append(ConfigLine(line=new_line, host=host, key=mapped_k, value=value))\n self.lines_.append(ConfigLine(line=\"\", host=None))", "def add(self, **kwargs):\n\n return self._doAdd(\n self.getResourceManager().getSdk().hosts,\n **kwargs\n )", "def post(self, request, *args, **kwargs):\n errors = validators.StackAddRemoveHostsValidator(request).validate()\n if errors:\n raise BadRequest(errors)\n\n action = request.DATA['action']\n\n if action == 'add':\n return self.add_hosts(request)\n elif action == 'remove':\n return self.remove_hosts(request)", "def _handle_HostEvent (self, event):\n self.host_alive.append(event.entry) \n print type(event.entry).__name__", "def set_host(self, host: str) -> None:\n _LOGGER.debug(\"Setting host to %s\", host)\n host_url = urlparse(host)\n self.scheme = host_url.scheme or \"http\"\n self.host = host_url.netloc or host_url.path\n self.base_url = f\"{self.scheme}://{self.host}\"\n self.api_url = f\"{self.base_url}/apps/api/{self.app_id}\"", "def insert_host(self, host):\n if host['host'] and host['user'] and host['passw']:\n hosts = Config().hosts\n cred = {'username': host['user'], 'password': host['passw']}\n hosts[host['host']] = cred\n Config().hosts = hosts", "def add(self, hostname, hostalias, hostip, hosttemplate, pollername=None, hgname=None):\n return super(HostTemplate, self).add(hostname, hostalias, hostip, hosttemplate, '', [])", "def handle_request(self,host,path,data=b''):\n\t\tself.response_code(5,\"Request handler not implemented\")", "def modify(request, host_id):\n POST = HostForm.filter_querydict(request.user, request.POST)\n host = get_host_or_404(request.user, pk=host_id)\n prefix = str(host_id)\n if request.is_ajax():\n template = \"clariadmin/ajax_host.html\"\n else:\n template = \"clariadmin/host.html\"\n \n remote_addr = get_request_remote_addr(request)\n \n add_fields = AdditionnalFieldForm(host=host, prefix=prefix)\n if POST:\n form = HostForm(request.user, remote_addr,\n POST, instance=host, prefix=prefix)\n if POST.get(\"delete\", False):\n form.delete()\n return redirect('list_hosts')\n if form.is_valid():\n host, add_fields = form.save(POST=POST, prefix=prefix)\n redir = POST.get('submit_button', False)\n if redir == 'new':\n return redirect('new_host')\n elif redir == 'save':\n pass\n elif redir == 'return':\n return redirect('list_hosts')\n else:\n form = HostForm(request.user, remote_addr,\n instance=host, prefix=prefix)\n form.log_action(u\"consulté\")\n return render_to_response(template, {\n \"form\": form,\n 'additionnal_fields': add_fields,\n 'prefix': prefix,\n 'ajax': request.is_ajax(),\n \"host\": host}, context_instance=RequestContext(request))", "def fill_host(self, data):\n check_input_params(data, self.HOST)\n self.host = data[self.HOST]", "def fill_host(self, data):\n check_input_params(data, self.HOST)\n self.host = data[self.HOST]", "def AddHostFlag(parser, required=False):\n help_text = \"\"\"\\\n IP or hostname of the database.\n When `--psc-service-attachment` is also specified, this field value should be:\n 1. For Cloud SQL PSC enabled instance - the dns_name field (e.g <uid>.<region>.sql.goog.).\n 2. For Cloud SQL PSA instance (vpc peering) - the private ip of the instance.\n \"\"\"\n parser.add_argument('--host', help=help_text, required=required)", "def test_add_hostname(self):\n hostname = 'test123.com'\n info = self.api.add_hostname(hostname, tags=['asd'])\n self.assertEqual(info['value'], hostname)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])", "def add_host_entries(hosts_file=None):\n from fabric.contrib.files import append\n if hosts_file:\n try:\n hosts = open(hosts_file)\n for line in hosts:\n append(\"/etc/hosts\", line.rstrip(\"\\n\"), use_sudo=True)\n except IOError:\n print \"ERROR: defined hosts file is missing!\"", "def check_add_hosts(self, export_details):\n\n playbook_host_dict = self.create_current_host_dict_playbook()\n add_host_dict = dict()\n host_type_list = ['no_access_hosts', 'read_only_hosts',\n 'read_write_hosts', 'read_only_root_hosts',\n 'read_write_root_hosts']\n\n for host_type in host_type_list:\n if playbook_host_dict[host_type]:\n hosts_to_add = list()\n ipv4_hosts, ipv6_hosts, fqdn_hosts = \\\n self.get_export_hosts(export_details[host_type])\n for host in playbook_host_dict[host_type]:\n version = check_ipv4_ipv6_fqdn(host)\n\n # Check if host is FQDN/Netgroup or IP\n if version:\n if version == 4:\n # IPv4 host is provided\n ipv4_host = self.get_ipv4_host(host)\n # Check if given host is member of already added\n # network\n if ipv4_host not in ipv4_hosts:\n if str(ipv4_host) not in hosts_to_add:\n hosts_to_add.append(str(ipv4_host))\n else:\n # IPv6 host is provided\n ipv6_host = self.get_ipv6_host(host)\n # Check if given host is member of already added\n # network\n if ipv6_host not in ipv6_hosts:\n if str(ipv6_host) not in hosts_to_add:\n hosts_to_add.append(str(ipv6_host))\n else:\n # FQDN/Netgroup is provided\n if host not in fqdn_hosts:\n if host not in hosts_to_add:\n hosts_to_add.append(host)\n if hosts_to_add:\n if host_type == \"read_only_root_hosts\":\n export_details[host_type].extend(hosts_to_add)\n add_host_dict['read_only_root_hosts'] = \\\n export_details[host_type]\n else:\n add_host_dict['add_' + host_type] = hosts_to_add\n\n LOG.info(\"Host list to add: %s\", add_host_dict)\n return add_host_dict", "def handle_hostname(bot, ievent):\n try:\n item = ievent.args[0]\n except IndexError:\n ievent.missing('<ipnr>')\n return\n try:\n hostname = socket.gethostbyaddr(item)\n ievent.reply(hostname[0])\n except:\n ievent.reply(\"can't match \" + str(item))", "def sethost(self, host):\n self.__host = host", "def add_handlers(self, host_pattern, host_handlers):\n pass", "def add_host_and_groups(self, host_name, group_paths = None):\n if len([h for h in self.hosts if h.name == host_name]) > 0:\n raise ValueError('Failed to add host \\'%s\\'. Host with the same name already exists.' % host_name)\n if not group_paths or len(group_paths) == 0:\n group_paths = ['all']\n host = Host(host_name)\n self.hosts.append(host)\n for group_path in group_paths:\n group = self.get_or_add_group(group_path)\n group.hosts.append(host)", "def add(self, host, auth, conn):\n self.conns[(host, auth)] = conn", "def test_host_header_set_ok(self):\n requests = [\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\",\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com \\r\\n\\r\\n\",\n \"GET http://tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n \"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n (\n \"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\n\"\n \"Host: tempesta-tech.com\\r\\n\"\n \"Forwarded: host=tempesta-tech.com\\r\\n\"\n \"Forwarded: host=tempesta1-tech.com\\r\\n\\r\\n\"\n ),\n ]\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\", requests=requests\n )\n self.check_response(client, status_code=\"200\", warning_msg=\"frang: \")", "def HandleAddVsRequest(self, request, response):\n default_vs_cache_level = \"2\"\n vs_name = request.GetParameter(constants.VS_NAME)\n vs_path = request.GetParameter(constants.VS_URL)\n vs_ssl = request.GetParameter(constants.VS_SSL)\n vs_cache_level = request.GetParameter(constants.VS_CACHE_LEVEL)\n if not vs_name:\n raise exceptions.PublishServeException(\"Missing Virtual Host name.\")\n\n if not vs_path:\n vs_path = \"/%s\" % vs_name\n\n if not vs_cache_level:\n vs_cache_level = default_vs_cache_level\n\n self._publish_helper.HandleAddVsRequest(\n vs_name, vs_path, bool(vs_ssl), vs_cache_level, response)", "def eff_request_host(request):\n erhn = req_host = request_host(request)\n if req_host.find(\".\") == -1 and not cookiejar.IPV4_RE.search(req_host):\n erhn = req_host + \".local\"\n return req_host, erhn", "def validate_host(request):\n log.debug(\n \"Inside validate_host. Request Data: %s\" % request.data)\n if request.method == 'POST':\n try:\n status = usm_wrapper_utils.check_host_ssh_auth(\n request.data['host'],\n request.data['port'],\n request.data['fingerprint'],\n request.data['username'],\n request.data['password'])\n if status:\n return Response({'message': 'Success'}, status=200)\n else:\n return Response({'message': 'Failed'}, status=417)\n except Exception, e:\n log.exception(e)\n return Response(\n {'message': 'Failed'}, status=417)\n else:\n return Response({})", "def Host(self, h):\r\n\r\n self.host = h\r\n return self", "def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.add(args)", "def dispatch_host(name, data):\n\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)", "def host(home_dir: str, addr: tuple, subdir_post: str = None):\n if subdir_post is None:\n subdir_post = home_dir\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(addr)\n s.listen(5)\n while True:\n c, addr = s.accept()\n threading.Thread(\n target=_handle_request, args=(c, addr, home_dir, subdir_post)\n ).start()", "def post(self, host_name, group_name): # noqa\n\n valid_parms = ['others']\n group_list = []\n group_list.append(group_name)\n\n args = request.args.to_dict()\n if args:\n logger.debug(\"additional args received\")\n if all(p in valid_parms for p in args.keys()):\n if 'others' in args:\n group_list.extend(args['others'].split(','))\n else:\n r = APIResponse()\n r.status = 'INVALID'\n r.msg = \"Supported additional parameters are \" \\\n \"{}\".format(','.join(valid_parms))\n return r.__dict__, self.state_to_http[r.status]\n\n for group in group_list:\n logger.debug(\"Adding host {} to group {}\".format(host_name, group))\n response = add_host(host_name, group)\n if response.status != 'OK':\n break\n\n return response.__dict__, self.state_to_http[response.status]", "def append(self, hostinfo: HostInfo) -> None:\n\n self.hostinfo_list.append(hostinfo)", "def host(self, host):\n\n self._host = host", "def host(self, host):\n\n self._host = host", "def host(self, host):\n\n self._host = host", "def host(self, host):\n\n self._host = host", "def insert_host(self, info: 'vrmjobs.HostInfo') -> bool:\n with self.lock:\n try:\n host = self._check_host_existence(info.hostname)\n\n if not host:\n ports = []\n for p in info.ports:\n ports.append({\"daemon\": p.daemon, \"port\": p.port})\n\n self.hosts.insert({\"hostname\": info.hostname,\n \"inet_addr\": info.inet_addr,\n \"ports\": ports,\n \"type\": info.type.name,\n \"latest_recv\": datetime.now().strftime(self.time_format)})\n return True\n return False\n except Exception as err:\n raise InsertError('Cannot insert new host {}'.format(str(info)), err)", "def at_added(self, host):\n\n if self.host:\n if self.host == host:\n return\n else:\n raise ComponentRegisterError(\"Components must not register twice!\")\n\n self.host = host", "def test_post_accepts_known_host(self, publish_mock: mock.Mock) -> None:\n\n def side_effect(*args: str, **_: str) -> Any:\n if args[0] == \"registry:first:value\":\n return [\"00:00:00:00:00\"]\n if args[0] == \"app_url\":\n return [\"/\"]\n if args[0] == \"jinja:render\":\n return [\"\"]\n return mock.DEFAULT\n\n publish_mock.side_effect = side_effect\n\n response = self.request(\"/\", method=\"POST\", host=\"host1\")\n\n self.assertEqual(response.code, 303)", "def updateHosts(request):\n\n updater = HostUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")", "def _add_node_to_etc_hosts(self):\n image = 'alpine:latest'\n command = 'echo \"{} {} # clusterdock\" >> /etc/hosts'.format(self.ip_address,\n self.fqdn)\n volumes = {'/etc/hosts': {'bind': '/etc/hosts', 'mode': 'rw'}}\n\n logger.debug('Adding %s to /etc/hosts ...', self.fqdn)\n client.containers.run(image=image,\n command=[self.execute_shell, '-c', command],\n volumes=volumes,\n remove=True)", "def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass", "def insert_host_overload(hostname, overload):\n IMPL.insert_host_overload(hostname, overload)", "def new(request, from_host=False):\n POST = HostForm.filter_querydict(request.user, request.POST)\n add_fields = None\n\n remote_addr = get_request_remote_addr(request)\n\n if POST:\n form = HostForm(request.user, remote_addr, POST)\n if form.is_valid():\n host, add_fields = form.save(POST=POST)\n redir = POST.get('submit_button', False)\n if redir == 'new':\n form = HostForm(request.user, remote_addr)\n elif redir == 'save':\n return redirect(host)\n elif redir == 'return':\n return redirect('list_hosts')\n else:\n if from_host:\n from_host = get_host_or_404(request.user, pk=from_host)\n inst, comp = from_host.copy_instance()\n form = HostForm(request.user, remote_addr, instance=inst)\n form.log_action(u\"consulté\", from_host)\n add_fields = AdditionnalFieldForm(comp, host=inst)\n else:\n form = HostForm(request.user, remote_addr)\n return render_to_response('clariadmin/host.html', {\n 'form': form,\n 'prefix': '8',\n 'additionnal_fields': add_fields},\n context_instance=RequestContext(request))", "def host(self, host: str):\n\n self._host = host", "def setHost(host, port, ssl=0):", "def connect_host():\n host_id = request.form[constants.HOST_ID]\n host_alias = request.form[constants.HOST_ALIAS]\n auth_method = request.form[constants.AUTH_METHOD]\n host_auth_user = request.form[constants.HOST_AUTH_USER]\n host_auth_password = request.form[constants.HOST_AUTH_PASSWORD]\n\n req = dict()\n req[constants.HOST_ID] = host_id\n req[constants.HOST_ALIAS] = host_alias\n req[constants.AUTH_METHOD] = auth_method\n req[constants.HOST_AUTH_USER] = host_auth_user\n req[constants.HOST_AUTH_PASSWORD] = host_auth_password\n\n errors = lutils.validate_new_host_details(req)\n\n if not errors:\n try:\n mm = machine_info(host_auth_user, host_id, host_auth_password)\n host_details = futils.connect(mm)\n return make_response(jsonify(host_details), 200)\n except Exception:\n errors.append(\"There was an error connecting to the host\")\n\n if errors:\n v = VayuException(400, \"There were some errors\", errors)\n return make_response(v.to_json(), v.status_code)", "def getHostInfo():", "def set_service_host(self, host):\n self._api_host = f\"https://{host}\"", "def do_host_event(client, args):\n args.type = 'host'\n do_event_show(client, args)", "def host(self, host: str, fields: str = None) -> dict:\n endpoint = f\"/api/host/{host}\" if host else \"/api/host/\"\n ret = self._request(\n endpoint=endpoint,\n params={\"fields\": fields} if fields else {},\n )\n return ret", "def post(self):\n\n try:\n if \"hosts\" in self.jbody:\n hostnames = self.jbody[\"hosts\"]\n else:\n hostnames = [{\"hostname\": self.jbody[\"hostname\"]}]\n except KeyError as err:\n raise exc.BadRequest(\n \"Missing Required Argument: {}\".format(err.message)\n )\n except ValueError as err:\n raise exc.BadRequest(err.message)\n\n log.info(\"HOSTS: Create {}\".format(\", \".join(\n [\n host[\"hostname\"] for host in hostnames\n ]\n )))\n\n try:\n hosts = []\n for hostname in hostnames:\n host = Host.create(self.session, hostname[\"hostname\"])\n hosts.append(host.to_dict(self.href_prefix))\n except IntegrityError as err:\n raise exc.Conflict(err.orig.message)\n except exc.ValidationError as err:\n raise exc.BadRequest(err.message)\n\n self.session.commit()\n\n if len(hosts) == 1:\n json = hosts[0]\n self.created(\"/api/v1/hosts/{}\".format(hosts[0][\"hostname\"]), json)\n else:\n self.created(data={\"hosts\": hosts, \"totalHosts\": len(hosts)})\n\n log.info(\"HOST: Created {}\".format(\", \".join(\n [host[\"hostname\"] for host in hostnames]\n )))", "def request(flow):\n flow.count = context.count\n context.count += 1\n context.hosts_list.add(flow.request.host)\n context.locusts.add(flow.request.host, flow)", "def _host_in_event(self, ev):\n self._update_nodes()\n\n if not self.nodes:\n return\n\n for node in self.nodes:\n if node.ip in ev.host.ipv4:\n datapath = self.dpset.get(ev.host.port.dpid)\n node.setPortInformation(ev.host.port.dpid, datapath, ev.host.port.port_no, ev.host.port)\n self._install_cdnengine_matching_flow(datapath, node.ip, node.port)\n self.logger.info('New Node connected the network. Matching rules were installed ' + node.__str__())", "def enable_host(self, name):\n from soppa.local import aslocal\n self.guest_ip = self.guest_ip()\n self.guest_host_name = name\n # Host (remote) change\n self.file.set_setting('/etc/hosts', '{0} {1}'.format('127.0.0.1', self.guest_host_name))\n # local change\n aslocal()\n self.file.set_setting('/etc/hosts', '{0} {1}'.format(self.guest_ip, name))", "def worker(self, data):\n print(data)\n host_raw = self.connection.do_request('host.get', {\n 'filter': {'host': data[\"ext\"]},\n 'output': ['hostid']\n }).get(\"result\")\n # print(\"host_raw\", host_raw)\n if host_raw:\n host_id = host_raw[0].get(\"hostid\")\n\n else:\n host_new = self.connection.do_request('host.create', {\"host\" : f\"{data.get('ext')}\",\n \"templates\": [\n {\"templateid\" : self.template_id}\n ],\n \"groups\": [\n {\"groupid\": self.group_id}\n ]\n\n })\n\n host_id = host_new.get(\"result\").get(\"hostids\")[0]\n self.send_data(data)", "def _update(self, host):\n pass", "def add_host_to_zone(self, host, zone):\n # Enable host nova service (in case disabled in the past)\n self.enable_host_service(host)\n\n # Verify host is registered.\n if not self.verify_host_exists(host):\n raise NovaNoSuchHost(host)\n # Verify a_z exists\n _agg = self.verify_zone_exists(zone)\n if not _agg:\n raise NovaNoSuchZone(zone)\n if self.get_host_zones(host) != ['nova']:\n for _zone in self.get_host_zones(host):\n self.remove_host_from_zone(host, _zone)\n _agg.add_host(host)\n # make sure success\n if zone in self.get_host_zones(host):\n return True\n else:\n return False", "def add_host(self, hostip, username=None, password=None, fetch_iface=True):\n username = username or self._ep_username\n password = password or self._ep_password\n try:\n prep_node(hostip, username, password)\n if not self.wait_on_host(hostip):\n log.error(\"Could not start service on %s\", hostip)\n if fetch_iface:\n self.add_endpoints(hostip, username, password)\n self.nodes.add(hostip)\n return True\n except Exception as err:\n log.error(\"Error in preparing host %s - %r\", hostip, err)\n return False", "def register(self, target, hostname, listener_type, expire=-1):", "def request_host(request):\n host = urlsplit(request.url).hostname\n if host == \"\":\n host = request.get_header(\"Host\", \"\").partition(\":\")[0]\n\n # remove port, if present\n return host.lower()", "def return_add_transport(hostname, domain_name, domain_extension, username):\n #Established the connection\n myconnection = ssh_connection(hostname, username)\n if myconnection == 1:\n return \"Connection to %s failed\" % hostname\n else:\n #We will to test if the domain already exist in the postfix configuration\n commandline=\"/bin/cat /etc/postfix/transport | grep %s | awk '{print $1}' | grep %s\" % (domain_name, domain_extension)\n print commandline\n stdin, stdout, stderr = myconnection.exec_command(commandline)\n if stdout.read():\n #The domain does not exist, exit\n return \"This domain extension (%s) already exist for the domain name %s\" % (domain_extension, domain_name)\n else:\n #Command to send to the host\n commandline=\"echo \\\"%s %s:\\\" >> /etc/postfix/transport\" % (domain_extension, domain_name)\n stdin, stdout, stderr = myconnection.exec_command(commandline)\n if stderr.read():\n is_added=False\n else:\n is_added=True\n\n if is_added == True:\n stdin, stdout, stderr = myconnection.exec_command(\"sudo /usr/sbin/postmap /etc/postfix/transport\")\n #Reload conf postfix\n stdin, stdout, stderr = myconnection.exec_command(\"sudo /etc/init.d/postfix restart\")\n if stderr.read():\n return \"The domain extension has not been added. Failed. The server postfix has not restarted. Please contact system administrator \"\n else:\n return \"This domain extension (%s) has been added for the domain name %s\" % (domain_extension, domain_name)\n else:\n return \"The domain extension has not been added. Failed. Please contact system administrator \"\n\n # Disconnect from the host\n myconnection.close()", "def handle_host(self, host, ignore_hosts=[]):\n LOG.info('MigrateHandler: handle host %s' % host)\n utils.list_instances(host=host)\n response = utils.do_host_migrate(host=host)\n self.handle_response(response)\n utils.list_instances(host=host)", "def create_host_list(self):\n # Get first network address and add to list\n net_address = input('What is a network address you want to ping? ')\n self.hosts.append(net_address)\n\n # Find out if user wants to add more network addresses\n while True:\n add_another = input('Add another? (y/n) ')\n print()\n if add_another.lower() == 'n' or add_another.lower() == 'no':\n break\n elif add_another.lower() == 'y' or add_another.lower() == 'yes':\n net_address = input(\"What is a network address you want to ping? \")\n self.hosts.append(net_address)\n else:\n print(\"That is an invalid input.\")\n print()\n os.system('cls')", "def addHostmask(self, hostmask):\n assert ircutils.isUserHostmask(hostmask), 'got %s' % hostmask\n if len(unWildcardHostmask(hostmask)) < 8:\n raise ValueError, \\\n 'Hostmask must contain at least 8 non-wildcard characters.'\n self.hostmasks.add(hostmask)", "def set_one(self, host_name, ip_address):\n self.hosts[host_name] = ip_address", "def setHostRoute( self, ip, intf ):\n return self.cmd( 'route add -host ' + ip + ' dev ' + intf )", "def response_host(self, response_host):\n\n self._response_host = response_host", "def response_host(self, response_host):\n\n self._response_host = response_host", "def update(self, host, values):\n body = dict(host=values)\n return self._update(\"/os-hosts/%s\" % host, body, response_key='host')", "def test_get_host(self):\n pass", "def host_name(self, host_name):\n\n self._host_name = host_name", "def host_name(self, host_name):\n\n self._host_name = host_name", "def add(self, host, plistener):\n\n host_entry = findACL(host)\n if (host_entry.plisteners != None):\n host_entry.plisteners.append(str(plistener))\n else:\n host_entry.plisteners = [str(plistener)]\n host_entry.put()\n\n plistener_entry = findACL(plistener)\n if (plistener_entry.psessions != None):\n plistener_entry.psessions.append(str(host))\n else:\n plistener_entry.psessions = [str(host)]\n plistener_entry.put()", "def getRequestHostname():", "def accept_hosts(request):\n log.debug(\n \"Inside accept_hosts. Request Data: %s\" % request.data)\n if request.method == 'POST':\n data = copy.deepcopy(request.data.copy())\n jobId = tasks.acceptHosts.delay(data)\n log.debug(\"Exiting ... JobID: %s\" % jobId)\n return Response(str(jobId), status=202)\n else:\n return Response({})", "def addHostToJob(d, h, j):\n \n if j not in d:\n d[j] = set()\n d[j].add(h.split(\".\")[0])", "def get_host(req):\n return req.META[\"HTTP_HOST\"].split(\":\")[0]", "def getRemoteHost():", "def add_host(inventory, host_conf):\n host_obj = dict(\n ansible_host=host_conf['FLOAT_IP'],\n ansible_ssh_common_args=\"-o StrictHostKeyChecking=no \"\n \"-o UserKnownHostsFile=/dev/null\",\n ansible_ssh_private_key_file=os.path.expanduser(\n '~/%s.pem' % host_conf['KEYPAIR_NAME']),\n ansible_ssh_user=host_conf['USER_NAME'],\n )\n server_name = host_conf['SERVER_NAME']\n inventory.setdefault('all', dict()).\\\n setdefault('hosts', dict())[server_name] = host_obj\n group_name = host_conf.get('GROUP_NAME', 'ceph-grafana')\n inventory.setdefault(group_name, dict()).\\\n setdefault('hosts', dict())[server_name] = dict()\n return inventory" ]
[ "0.6840379", "0.65409213", "0.65173084", "0.64930916", "0.64499176", "0.6448675", "0.6233369", "0.6211751", "0.6179076", "0.61413676", "0.61369586", "0.6132575", "0.6132081", "0.61151505", "0.60892236", "0.6072281", "0.6072281", "0.6012972", "0.60013103", "0.5987418", "0.5978874", "0.5974824", "0.59652525", "0.59628475", "0.59544355", "0.59446096", "0.5915763", "0.5883168", "0.58741", "0.58488405", "0.579046", "0.57848614", "0.57848614", "0.5766445", "0.5761132", "0.575359", "0.57510775", "0.5749474", "0.5728752", "0.5724837", "0.57004863", "0.5698266", "0.56918633", "0.56807", "0.56805825", "0.5656774", "0.5656139", "0.5637394", "0.563036", "0.5623005", "0.561684", "0.5615322", "0.5606963", "0.5606963", "0.5606963", "0.5606963", "0.5603151", "0.559917", "0.559903", "0.55874765", "0.5568202", "0.5567573", "0.5566727", "0.5543139", "0.5540761", "0.5536924", "0.5534209", "0.5522443", "0.5507686", "0.5502662", "0.54910403", "0.5487778", "0.5479246", "0.54781735", "0.5476912", "0.54686", "0.5463312", "0.54616714", "0.54607326", "0.545766", "0.5453222", "0.54516107", "0.54392076", "0.5434426", "0.5427956", "0.5420618", "0.53933907", "0.5387365", "0.5387365", "0.5383329", "0.53740996", "0.5371423", "0.5371423", "0.53658617", "0.5364314", "0.5353659", "0.5331582", "0.5304921", "0.5295644", "0.528443" ]
0.64364
6
Download log files from remote machines on local machine via ssh
def __download_via_ssh(cls, request, local_path): hosts = request.POST.getlist('hosts[]') logs = request.POST.getlist('logs[]') if not os.path.exists(local_path): os.makedirs(local_path) for host_name in hosts: host_object = Host.objects.get(host_name=host_name) host_path = os.path.join(local_path, host_name) if not os.path.exists(host_path): os.makedirs(host_path) for log_name in logs: log_object = Log.objects.get(log_name=log_name) help_methods.get_file_via_ssh( getattr(log_object, 'log_path'), host_path, getattr(host_object, 'host_name'), getattr(host_object, 'host_root_user'), getattr(host_object, 'host_root_password') )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PullLogs(ssh, log_files, download_folder):\n for log_file in log_files:\n target_file = os.path.join(download_folder, os.path.basename(log_file))\n ssh.ScpPullFile(log_file, target_file)\n _DisplayPullResult(download_folder)", "def ssh_download_files(data):\n with _ssh_connect() as ssh:\n with ssh.open_sftp() as sftp:\n with click.progressbar(data, label='downloads') as items: # noqa\n for item in items:\n _, filename = os.path.split(item)\n sftp.get(item, f'{DOWNLOAD_DIR}/{filename}')", "def logs():\n puts(yellow(\"[Reading log-file]\"))\n run(\"cat %s\" % REMOTE_ERR_FILE)\n run(\"cat %s\" % REMOTE_LOG_FILE)", "def _download(self, url, rel_path):\n \n tmp_dir = \"TMP_DIR=`mktemp -d`;\"\n wget_cmd = [ tmp_dir, \"wget\", \"-nv\", \"-O\", \"$TMP_DIR/archive.tgz\", url, \";\" ]\n wget_cmd = ' '.join(wget_cmd)\n \n mkdir_cmd = \"mkdir -p %s ;\" % (\"./remote_resources/\" + rel_path)\n \n cleandir_cmd = \"rm -Rf %s/* ;\" % (\"./remote_resources/\" + rel_path)\n \n untar_cmd = [ \"tar\", \"xf\", \"$TMP_DIR/archive.tgz\", \"-C\", \"./remote_resources/%s\" % rel_path, \";\" ]\n untar_cmd = ' '.join(untar_cmd)\n \n remove_cmd = \"rm -Rf $TMP_DIR;\"\n \n return self._ssh(' '.join([ wget_cmd, mkdir_cmd, cleandir_cmd, untar_cmd, remove_cmd ]))", "def download(cls, host, remotepath, localpath, user=None):\n # TODO: consider a noclobber option to backup existing files\n\n if not user:\n user = cls.user\n\n # run the command\n ssh = cls._get_ssh_connection(host, user)\n if not ssh:\n print \"ERROR: No ssh connection\"\n return False\n\n # TODO: catch exceptions thrown by SshMachine.download()\n ssh.download(remotepath, localpath)", "def pull_file():\n try:\n global IP_OR_HOST\n paramiko.util.log_to_file(BASE_DIR + '/logs/amazon_os.log')\n config = _get_hvm_config()\n key = paramiko.RSAKey.from_private_key_file(config.get('amazon_hvm').get('private_key_path'))\n transport = paramiko.Transport(IP_OR_HOST, 22)\n transport.connect(username=config.get('amazon_hvm').get('username'), pkey=key)\n sftp = paramiko.SFTPClient.from_transport(transport)\n p = sftp.put(BASE_DIR + '/logs/amazon_os.log', '/etc/test/amazon_os.log')\n # sftp.get('remove_path', 'local_path')\n transport.close()\n except Exception as e:\n transport.close()\n raise e\n else:\n return transport", "def _process_logs_download(self, logfile):\r\n\r\n print 'Downloading PCU logs'\r\n command = 'robot --outputdir \"C:\\Robot Framework\\Output\\PCU_logs\" {}.robot'.format(self.name)\r\n\r\n return self._run_command(command, logfile)", "def download_file(self):\n\n self.monitor.info(f'-> Started to download log file from: {self.url}...')\n try:\n log_file = requests.get(self.url, allow_redirects=True)\n\n postfix = f'{datetime.today().strftime(\"%Y_%m_%d\")}_{str(int(time.time()))}'\n filename = f\"log_file_{postfix}.txt\"\n\n self.monitor.info(f'-> Writing file to {LOG_FILES_PATH}...')\n open(LOG_FILES_PATH + '/' + filename, 'wb').write(log_file.content)\n self.monitor.info(f'-> Finished writing file to {LOG_FILES_PATH}.')\n\n file_path = self.log_file_path + '/' + filename\n\n return file_path, postfix\n\n except requests.exceptions.SSLError as connection_error:\n self.monitor.exception(f'-> Something bad happened. Details:\\n {repr(connection_error)}')\n return None, None", "def sync_log(self):\r\n print('Synchronizing log files...')\r\n\r\n # Connect with SSH-PubKey and synchronize files\r\n subprocess.run(\r\n ['scp',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}:/home/robot/.bin/*_tmux.log'.format(self.settings['ip']),\r\n self.log_path\r\n ])\r\n\r\n print('Done.')", "def remote_fetch(ip_addr, username, cmd):\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n ssh.connect(ip_addr, username=username)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n return stdout.readlines()", "def Logs():\n # time.sleep(100)\n params = request.get_json()\n hostname = params.get('vaultip', '164.99.91.35')\n password = params.get('boxpass', 'novell')\n username = params.get('boxusername', 'root')\n port = 22\n logType = 'download'\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=hostname, username=username, password=password, port=port)\n\n try:\n print('configure')\n logType= 'configure'\n sftp = client.open_sftp()\n stdin = sftp.open('/var/opt/netiq/idm/log/idmconfigure.log','r')\n # sftp.close();\n except Exception:\n try:\n print('install')\n logType= 'install'\n # sftp = client.open_sftp()\n stdin = sftp.open('/var/opt/netiq/idm/log/idminstall.log','r')\n except Exception:\n #sftp.close()\n print('download')\n logType= 'download'\n try:\n stdin = sftp.open('/tmp/download.log','r')\n except Exception:\n sftp.close()\n return jsonify('no file'),200\n #sftp.close()\n log = stdin.readlines()\n data={'type':logType,'log':log}\n return jsonify(data),200", "def download(self, remotepath, localpath):\n sftp = self.connection.open_sftp()\n if isinstance(remotepath, str):\n sftp.get(remotepath, localpath)\n else:\n for path in remotepath:\n filename = os.path.split(path)[-1]\n sftp.get(path, localpath + \"/\" + filename)\n sftp.close()", "def sftp_download_latest_file(self, host, port, usr, pwd, remote, local=None, **kwargs):\n filefilter = kwargs.get('filter')\n with pysftp.Connection(host, username=usr, password=pwd, port=int(port)) as self.sftp:\n try:\n self.sftp.chdir(remote)\n self._log.debug('sftp walking to %s', remote)\n except (IOError, OSError):\n self._log.debug(\"sftp cd to dir '%s' failed!\", remote)\n\n sftp_curr_dir = self.sftp.getcwd()\n\n statfiles = list(\"%s/%s\" % (sftp_curr_dir, filename) for filename in self.sftp.listdir(sftp_curr_dir) if re.search(filefilter, filename))\n sorted_statfiles = list(sorted([filename for filename in statfiles], key=self.mtime))\n try:\n target_file = sorted_statfiles[-1]\n except (IndexError, NameError):\n self._log.debug(\"'%s' not found!\", filefilter)\n\n if local is None:\n local = os.getcwd()\n if '.' not in os.path.basename(local):\n local = os.path.join(local, target_file.split('/')[-1])\n if os.path.exists(os.path.split(local)[0]) is False:\n os.makedirs(os.path.split(local)[0])\n\n self.sftp.get(target_file, local)\n self.sftp.close()", "def download(self, output):\n self.wait()\n path = 'auditlogEntryReport/download'\n with open(output, 'w') as f:\n f.write(self._session.get(path))\n LOGGER.info('log downloaded: {}'.format(output))", "def get_logs(self):\n logs_directory = self.protocol_config['logs']\n protocol_name = self.protocol_config['protocol']\n os.system(f'fab -f Execution/fabfile.py get_logs:{logs_directory} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def download(server):\n for i in range(10):\n start_time = time.time()\n logging.debug('Start downloading: %d' % i)\n os.system(\"scp %s:18DOWNLOAD downloads/\" % server)\n end_time = time.time()\n logging.debug('End downloading...')\n logging.debug('Time taken by downloader: %s' % (end_time - start_time))", "def collect_log(self):\n path = 'cluster_test_%d/*.log' % self.address[1]\n src = \"%s@%s:%s\" % (self.user_name, self.address[0], path)\n dest = console_config._log_path\n self._rsync(src, dest)", "def download(self, localfile: str, remotefile: str, overwrite: bool = True, **kwargs):\n logf = ''\n logn = self._logcnt()\n logcodei = \"%put E3969440A681A24088859985\" + logn + \";\"\n logcodeo = \"\\nE3969440A681A24088859985\" + logn\n logcodeb = logcodeo.encode()\n\n valid = self._sb.file_info(remotefile, quiet = True)\n\n if valid is None:\n return {'Success' : False, \n 'LOG' : \"File \"+str(remotefile)+\" does not exist.\"}\n\n if valid == {}:\n return {'Success' : False, \n 'LOG' : \"File \"+str(remotefile)+\" is a directory.\"}\n\n if os.path.isdir(localfile):\n locf = localfile + os.sep + remotefile.rpartition(self._sb.hostsep)[2]\n else:\n locf = localfile\n\n try:\n fd = open(locf, 'wb')\n fd.write(b'write can fail even if open worked, as it turns out')\n fd.close()\n fd = open(locf, 'wb')\n except OSError as e:\n return {'Success' : False, \n 'LOG' : \"File \"+str(locf)+\" could not be opened or written to. Error was: \"+str(e)}\n\n code = \"filename _sp_updn '\"+remotefile+\"' recfm=F encoding=binary lrecl=4096;\"\n\n ll = self.submit(code, \"text\")\n logf = ll['LOG']\n\n self.stdin[0].send(b'tom says EOL=DNLOAD \\n')\n self.stdin[0].send(b'\\n'+logcodei.encode()+b'\\n'+b'tom says EOL='+logcodeb+b'\\n')\n\n done = False\n datar = b''\n bail = False\n\n while not done:\n while True:\n if os.name == 'nt':\n try:\n rc = self.pid.wait(0)\n self.pid = None\n self._sb.SASpid = None\n return {'Success' : False, \n 'LOG' : \"SAS process has terminated unexpectedly. RC from wait was: \"+str(rc)}\n except:\n pass\n else:\n rc = os.waitpid(self.pid, os.WNOHANG)\n if rc[1]:\n self.pid = None\n self._sb.SASpid = None\n return {'Success' : False, \n 'LOG' : \"SAS process has terminated unexpectedly. RC from wait was: \"+str(rc)}\n\n if bail:\n if datar.count(logcodeb) >= 1:\n break\n try:\n data = self.stdout[0].recv(4096)\n except (BlockingIOError):\n data = b''\n\n if len(data) > 0:\n datar += data\n if len(datar) > 8300:\n fd.write(datar[:8192])\n datar = datar[8192:]\n else:\n sleep(0.1)\n try:\n log = self.stderr[0].recv(4096).decode(self.sascfg.encoding, errors='replace')\n except (BlockingIOError):\n log = b''\n\n if len(log) > 0:\n logf += log\n if logf.count(logcodeo) >= 1:\n bail = True\n done = True\n\n fd.write(datar.rpartition(logcodeb)[0])\n fd.flush()\n fd.close()\n\n self._log += logf\n final = logf.partition(logcodei)\n z = final[0].rpartition(chr(10))\n prev = '%08d' % (self._log_cnt - 1)\n zz = z[0].rpartition(\"\\nE3969440A681A24088859985\" + prev +'\\n')\n logd = zz[2].replace(\";*\\';*\\\";*/;\", '')\n\n ll = self.submit(\"filename _sp_updn;\", 'text')\n logd += ll['LOG']\n \n return {'Success' : True, \n 'LOG' : logd}", "def get(host, username, remotepath, localpath=None, port=22):\n log = logging.getLogger('device.remotecall')\n log.info('geting file from remote:%s -> %s', remotepath, localpath)\n if not localpath:\n localpath = os.path.split(remotepath)[1]\n cmd = 'scp -P %s %s@%s:%s %s' % (port, username, host, remotepath, localpath)\n try:\n null = open('/dev/null', 'w')\n subprocess.call(shlex.split(cmd), stdin=subprocess.PIPE, stdout=null, stderr=null)\n null.close()\n except Exception as e:\n log.debug('Could not retrieve %s file from %s: Error %s', remotepath, host, e)", "def download_stewicombo_from_remote(name):\n meta = set_stewicombo_meta(name, category='')\n log.info(f'attempting download of {name} from {paths.remote_path}')\n download_from_remote(meta, paths)", "def GetAllLogFilePaths(ssh):\n ssh_cmd = [ssh.GetBaseCmd(constants.SSH_BIN), _FIND_LOG_FILE_CMD]\n log_files = []\n try:\n files_output = utils.CheckOutput(\" \".join(ssh_cmd), shell=True)\n log_files = FilterLogfiles(files_output.splitlines())\n except subprocess.CalledProcessError:\n logger.debug(\"The folder(%s) that running launch_cvd doesn't exist.\",\n constants.REMOTE_LOG_FOLDER)\n return log_files", "def collect_files_from_vm(self, hostname='localhost', username=None, password=None, src=None, dst=None):\n self._scp_get(hostname=hostname, username=username, password=password, srcfile=src, destfile=dst)", "def download_files(file_uris):\n\n if os.path.exists(LOG_FILE):\n log_file = open(LOG_FILE, \"rU+\")\n downloaded_podcasts = strip_newlines(log_file)\n else:\n log_file = open(LOG_FILE,\"w\")\n downloaded_podcasts = []\n\n for uri in file_uris:\n # if the current file URI is not found in the log, it is a new file, and\n # is thus downloaded\n if uri not in downloaded_podcasts:\n # extract filename from the URI \n uri_split = re.split(\"/\", uri)\n filename = uri_split[len(uri_split) - 1]\n \n # download the file\n if OUTPUT:\n print \"downloading \" + uri\n urllib.urlretrieve(uri, DEST_DIR + os.sep + filename)\n log_file.write(uri + os.linesep)\n\n log_file.close()", "def download_cloudtrail_logs(target_dir, bucket, cloudtrail_prefix, org_ids,\n account_ids, regions, from_date, to_date, parallelism):\n prefixes = _s3_key_prefixes(cloudtrail_prefix, org_ids, account_ids, regions, from_date, to_date)\n _s3_download_recursive(bucket, prefixes, target_dir, parallelism)", "def download_all_ftp(download_dir, file_match, ftp_host, ftp_login, \r\n ftp_passwd, ftp_directory, max_wait=60):\r\n if max_wait < 0:\r\n max_wait = 0\r\n \r\n remove_old_ftp_downloads(download_dir)\r\n #open the file for writing in binary mode\r\n all_files_downloaded = []\r\n print 'Opening local file'\r\n time_start_connect_attempt = datetime.datetime.utcnow()\r\n request_incomplete = True\r\n ftp_exception = \"FTP Request Incomplete\"\r\n attempt_count = 1\r\n while ((datetime.datetime.utcnow()-time_start_connect_attempt)<datetime.timedelta(minutes=max_wait) \\\r\n or attempt_count == 1) and request_incomplete:\r\n try:\r\n #init FTPClient (moved here because of traffic issues)\r\n ftp_client = PyFTPclient(host=ftp_host,\r\n login=ftp_login,\r\n passwd=ftp_passwd,\r\n directory=ftp_directory)\r\n ftp_client.connect()\r\n file_list = ftp_client.ftp.nlst(file_match)\r\n ftp_client.ftp.quit()\r\n #if there is a file list and the request completed, it is a success\r\n if file_list:\r\n for dst_filename in file_list:\r\n local_path = os.path.join(download_dir, dst_filename)\r\n local_dir = local_path[:-1*len(FileExtension(local_path))-1]\r\n #download and unzip file\r\n try:\r\n #download from ftp site\r\n unzip_file = False\r\n if not os.path.exists(local_path) and not os.path.exists(local_dir):\r\n print \"Downloading from ftp site: \" + dst_filename\r\n unzip_file = ftp_client.download_file(dst_filename, local_path)\r\n else:\r\n print dst_filename + ' already exists. Skipping download ...'\r\n #extract from tar.gz\r\n if unzip_file:\r\n\t\t\t print \"Extracting: \" + dst_filename\r\n ExtractNested(local_path, True)\r\n #add successfully downloaded file to list\r\n all_files_downloaded.append(local_dir)\r\n #request successful when one file downloaded and extracted \r\n request_incomplete = False\r\n else:\r\n print dst_filename + ' already extracted. Skipping extraction ...'\r\n except Exception as ex:\r\n print ex\r\n if os.path.exists(local_path):\r\n os.remove(local_path)\r\n continue\r\n \r\n except Exception as ex:\r\n ftp_exception = ex\r\n pass\r\n \r\n if request_incomplete:\r\n print \"Attempt\", attempt_count, \"failed ...\"\r\n attempt_count += 1\r\n if max_wait > 0:\r\n sleep_time = 5.1\r\n if max_wait < 5.1:\r\n sleep_time = max(max_wait, 0.1)\r\n print \"Sleeping for\", (sleep_time-0.1), \"minutes and trying again ...\"\r\n time.sleep((sleep_time-0.1)*60)\r\n \r\n \r\n \r\n if request_incomplete:\r\n print \"Maximum wait time of\", max_wait, \"minutes exeeded and request still failed. Quitting ...\"\r\n raise Exception(ftp_exception)\r\n \r\n print \"All downloads completed!\"\r\n return all_files_downloaded", "def remote(self, requests, file, remoteHost):\n # Set the source and dest paths\n remote_url = self.base_url + '/remote?file=' + file + \"&host=\" + remoteHost\n\n print(\"Making remote request: \" + remote_url)\n\n r = requests.get(remote_url, max_price=10)\n\n print(\"Remote request completed.\")\n\n return r.json()", "def download_file(self, remote_file):\n remote_file.download()", "def test_retrieve_files_all(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_2)\n self.assertTrue(result_1)", "def SelectLogFileToPull(ssh, file_name=None):\n log_files = GetAllLogFilePaths(ssh)\n if file_name:\n file_path = os.path.join(constants.REMOTE_LOG_FOLDER, file_name)\n if file_path in log_files:\n return [file_path]\n raise errors.CheckPathError(\"Can't find this log file(%s) from remote \"\n \"instance.\" % file_path)\n\n if len(log_files) == 1:\n return log_files\n\n if len(log_files) > 1:\n print(\"Multiple log files detected, choose any one to proceed:\")\n return utils.GetAnswerFromList(log_files, enable_choose_all=True)\n\n raise errors.CheckPathError(\"Can't find any log file in folder(%s) from \"\n \"remote instance.\" % constants.REMOTE_LOG_FOLDER)", "def cli_copy_pcc_logs(host_ip:str, linux_user:str, linux_password:str)->dict:\n try:\n cmd = \"sudo rm -rf /tmp/logs; sudo docker cp pccserver:/home/logs/ /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd)\n os.makedirs(\"output/pccserver_logs\", exist_ok=True)\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/ansible.log\", \"output/pccserver_logs/ansible.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/default.log\", \"output/pccserver_logs/default.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/detailed.log\", \"output/pccserver_logs/detailed.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/error.log\", \"output/pccserver_logs/error.log\")\n cmd = \"sudo rm -rf /home/ceph/; sudo docker cp pccserver:/home/jobs/ceph /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd) \n os.makedirs(\"output/pccserver_logs/ceph\", exist_ok=True)\n cli_copy_folder_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/ceph/cluster/\",\"output/pccserver_logs/ceph/\")\n \n cmd = \"sudo rm -rf /tmp/logs; sudo docker cp platina-executor:/home/logs/ /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd)\n os.makedirs(\"output/platina_executor_logs\", exist_ok=True)\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/ansible.log\", \"output/platina_executor_logs/ansible.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/default.log\", \"output/platina_executor_logs/default.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/detailed.log\", \"output/platina_executor_logs/detailed.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/error.log\", \"output/platina_executor_logs/error.log\")\n cmd = \"sudo rm -rf /home/kubernetes/; sudo docker cp platina-executor:/home/jobs/kubernetes /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd) \n os.makedirs(\"output/platina_executor_logs/kubernetes\", exist_ok=True)\n cli_copy_folder_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/kubernetes/cluster/\",\"output/platina_executor_logs/kubernetes/\")\n \n cmd = \"sudo rm -rf /output/logs\"\n os.system(cmd) \n \n return \"OK\"\n except Exception as e:\n return {\"Error\": str(e)}", "def download_csv_log():\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n if Configuration.enable_download:\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n if lh.check_user_enabled_download(user, process):\n content = lh.get_handler_for_process_and_session(process, session).download_csv_log()\n return jsonify({\"content\": content})\n return jsonify({\"content\": \"\"})", "def post_download(self, remote_files):\n pass", "def release_log(self, ssh, action, projectpath):\n git_repo = git.GitRepo(path=projectpath)\n log_entry = (f'{dt.datetime.utcnow().isoformat()} '\n f'[{getpass.getuser()}@{socket.gethostname()}] '\n f'{action.upper()} {self.name} '\n f'[SOURCE: {git_repo.get_branch()} {git_repo.get_tag()}]')\n cmd = f'echo \"{log_entry}\" >> ~/.pproject.log'\n _, stdout, stderr = ssh.exec_command(cmd)\n stdout.channel.recv_exit_status()\n err = stderr.read().strip().decode('ascii')", "def test_retrieve_files_error_message(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmpp/remote_pacha')\n sys.stdout = MockSys()\n sys.exit = MockSys()\n run.retrieve_files()\n actual = sys.stdout.captured()\n expected = \"\"\"\nPacha was not able to retrieve the files from the SSH server provided.\nCheck your configuration file settings and try again.\n\"\"\"\n self.assertEqual(actual, expected)", "def download_file(self, instance, file, where, local):\n\n instance = self.get_instance(instance)\n\n try:\n if instance.get('address'):\n username = instance.get('address') + \"@\" + instance.get('credentials').get('username')\n key = instance.get('credentials').get('publickey')\n subprocess.check_output([\"scp\", key, username + \":\" + self.default_path_aws + where + file, local])\n else:\n username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')\n key = instance.get('credentials').get('EC2_SECRET_KEY')\n # output = os.popen(\"ls\"+ \" | \" + \"ssh\"+ \" -i \"+ key +\" \"+ username).read()\n subprocess.check_output(\n [\"scp\", \"-i\", key, username + ':' + self.default_path_aws + where + file, local])\n return \"Success to download file \" + self.default_path_aws + where + file + \" to \" + local\n except:\n return \"Faile to access the instance\"", "def downloadFile(remote_path, fobj):\n logger.msg(\n \"downloading file\", remote_path=remote_path, function='downloadFile'\n )\n\n def file_writer(data):\n fobj.write(data)\n\n remote_path = remote_path.encode('utf-8')\n r = yield treq.get(remote_path, timeout=5)\n try:\n yield treq.collect(r, file_writer)\n except Exception as e:\n print e\n raise", "def remotes():", "def download_images(self, url_file, destination_dir, log_file):\n try:\n self._download_images(url_file, destination_dir, log_file)\n except IOError as error:\n sys.stderr.write(str(error))\n sys.exit(error.errno)\n except Exception as error:\n sys.stderr.write('[Unknown error] %s' % str(error))\n sys.exit(1)", "def __download(self, downloadType):\n\n downloadAudit = {}\n downloadAudit['download_type'] = downloadType\n downloadAudit['remote_url'] = self.remoteUrl#get the url that is being requested\n downloadAudit['download_start_time'] = datetime.now()#capture the date when the url was accessed\n #first make an entry into the db stating that a download operation is to be attempted\n downloadAudit['comment'] = 'Starting download operation'\n newRecordId = self.__auditFileRequest(downloadAudit)\n\n downloadAudit = {}\n try:\n req = urllib2.Request(self.remoteUrl)\n r = urllib2.urlopen(req)\n except URLError, urle:\n if hasattr(urle, 'reason'):\n downloadAudit['comment'] = urle.reason\n else:\n downloadAudit['comment'] = urle.__str__()\n \n self.__auditFileRequest(downloadAudit, newRecordId)\n \n return None #just return since there has been an error in connecting with the remote server\n\n try:\n downloadAudit['local_file_path'] = '/' + self.localRepoDir + '/' + downloadType + '/' + downloadType + '-' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '.xls'\n\n file = open(settings.APPLICATION_SETTINGS['MCH_HOME'] + downloadAudit['local_file_path'], 'wb')\n file.write(r.read())\n\n #headerInfo = r.info()\n\n isFileToBeProcessed = False #default is not to process file\n\n #before downloading, check to see if the remote file is more recent than the last file that was downloaded, whose\n #information is in the db\n latestFetch = self.__recentDownload(downloadType)\n\n if latestFetch:\n downloadAudit['file_size'] = latestFetch[0]\n file.flush()#make sure all the content is written to file \n os.fsync(file.fileno())\n if latestFetch[0] != os.path.getsize(file.name):\n isFileToBeProcessed = True\n else:\n isFileToBeProcessed = True\n\n if isFileToBeProcessed:\n downloadAudit['file_size'] = os.path.getsize(file.name)\n downloadAudit['file_modification_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n downloadAudit['download_completion_time'] = datetime.now()\n #now make an audit entry into the database\n downloadAudit['comment'] = 'New file to be processed.'\n else:\n os.remove(file.name)#remove the file since it looks like it has already been downloaded and processed\n #now make an audit entry into the database\n downloadAudit['comment'] = 'File already downloaded, purged it from the file system.'\n\n self.__auditFileRequest(downloadAudit, newRecordId)\n file.close()\n except Exception as e:\n self.logger.exception('\\n Unknown fatal error occured during the downloading of the raw files. %s' % str(e))\n\n return newRecordId", "def get_logs_from_ftp(self, date, server_id, folder):\n\n node = handler.cloudServerHelper.util.get_node(ServerList[server_id][sC.INSTANCE_NAME])\n ip = handler.cloudServerHelper.util.ip(node)\n\n request = {\n 'score_starting_': self.score_starting_,\n 'logs_starting_': self.logs_starting_,\n 'results_': self.results_,\n 'amxmodx_logs_': self.amxmodx_logs_,\n 'cstrike_logs_': self.cstrike_logs_,\n 'folder': folder,\n 'ip': ip,\n 'username': ServerList[server_id][sC.USERNAME],\n 'password': ServerList[server_id][sC.PASSWORD],\n 'date': date.timestamp(),\n }\n\n data_json = json.dumps(request)\n r = requests.post(self.ftp_logs_func_url, json=data_json)\n\n if r.status_code != 200:\n raise server_error", "def PullFileFromInstance(cfg, instance, file_name=None, no_prompts=False):\n ssh = Ssh(ip=IP(ip=instance.ip),\n user=constants.GCE_USER,\n ssh_private_key_path=cfg.ssh_private_key_path,\n extra_args_ssh_tunnel=cfg.extra_args_ssh_tunnel)\n log_files = SelectLogFileToPull(ssh, file_name)\n download_folder = GetDownloadLogFolder(instance.name)\n PullLogs(ssh, log_files, download_folder)\n if len(log_files) == 1:\n DisplayLog(ssh, log_files[0], no_prompts)\n return report.Report(command=\"pull\")", "def download_workflow_log_files(repo, github_token, workflow_run_id, data_root_dir):\n headers = {\n 'Accept': 'application/vnd.github.v3+json',\n }\n query_url = f\"https://api.github.com/repos/{repo.owner.login}/{repo.name}/actions/runs/{workflow_run_id}/logs\"\n response = requests.get(query_url, headers=headers,\n auth=('username', github_token))\n if 'zip' in response.headers['Content-Type']:\n zip_obj = zipfile.ZipFile(io.BytesIO(response.content))\n data_dir = Path(data_root_dir, Workflows.WORKFLOWS_DIR, str(workflow_run_id))\n zip_obj.extractall(data_dir)\n return len(zip_obj.namelist())\n else:\n return None", "def processData(self):\n _lastLine = True\n _firstRead = True\n n = 0\n while self.console.working:\n remotelog = ''\n response = ''\n remote_log_data = ''\n remotelogsize = 0\n\n # specify range depending on if the last line\n # is in the remote log chunk or not\n if _lastLine:\n b = 'bytes=-10000'\n else:\n b = 'bytes=-100000'\n\n headers = {\n 'User-Agent' : user_agent,\n 'Range' : b,\n 'Accept-encoding' : 'gzip'\n }\n\n request = urllib2.Request(self._url, None, headers)\n\n # get remote log url response and headers\n try:\n response = urllib2.urlopen(request)\n headers = response.info()\n\n # buffer/download remote log\n if response != '':\n remote_log_data = response.read()\n remotelogsize = round((len(remote_log_data) / float(1024)), 2)\n # self.verbose('Downloaded: %s KB total' % remotelogsize)\n\n try:\n # close remote file\n response.close()\n except AttributeError, e:\n self.error('ERROR: %s' % e)\n\n except (urllib2.HTTPError, urllib2.URLError), e:\n self.error('HTTP ERROR: %s' % e)\n except socket.timeout:\n self.error('TIMEOUT ERROR: socket timed out!')\n\n # start keeping the time\n start = time.time()\n\n # decompress remote log and return for use\n # First, make sure that there is domething worth decompressing\n # In case the server has just done a restart\n if len(remote_log_data) > 0:\n try:\n #self.debug('Content-Encoding: %s' % headers.get('Content-Encoding'))\n if headers.get('Content-Encoding') == 'gzip':\n compressedstream = StringIO.StringIO(remote_log_data)\n gzipper = gzip.GzipFile(fileobj=compressedstream)\n remotelog = gzipper.read()\n else:\n remotelog = remote_log_data\n except IOError, e:\n remotelog = ''\n self.error('IOERROR: %s' % e)\n\n if os.path.exists(self.locallog) and os.path.getsize(self.locallog) > 0 and not _firstRead:\n\n # check if last line is in the remote log chunk\n if remotelog.find(self.lastlines) != -1:\n _lastLine = True\n n = 0\n\n # we'll get the new lines i.e what is available after the last line\n # of our local log file\n try:\n checklog = remotelog.rpartition(self.lastlines)\n newlog = checklog[2]\n # remove any broken last line\n i = newlog.rfind ('\\r\\n')\n newlog = newlog[:i + 2]\n # remove any blank lines\n while newlog[-4:-2] == '\\r\\n':\n newlog = newlog[:-2]\n except ValueError, error:\n self.error ('ValueError: %s' % error)\n newlog = ''\n\n # remove any blank lines from end\n # append the additions to our log if there is something and update lazy cursor\n if len(newlog) > 0:\n output = open(self.locallog,'ab')\n output.write(newlog)\n output.close()\n self.lastlines = remotelog[-1000:] \n self.debug('downloaded %s KB and added %s char(s) to log' % (remotelogsize, len(newlog)))\n\n else:\n _lastLine = False\n self.debug('can\\'t find last line in the log chunk: checking again...')\n n += 1\n\n # check once in a larger chunk and if we are still unable to find last line\n # in the remote chunk, restart the process\n if n == 2:\n self.debug('Logs rotated or unable to find last line in remote log, restarting process...')\n self.writeCompletelog(self.locallog, remotelog)\n _lastLine = True\n n = 0\n\n else:\n self.debug('writing first log read')\n self.writeCompletelog(self.locallog, remotelog)\n _firstRead = False\n\n # calculate how long it took to process\n timespent = time.time() - start\n\n # calculate time to wait until next request.\n timeout = float(self.timeout)\n\n # self.verbose('Given timeout value is %s seconds' % timeout)\n # self.verbose('Total time spent to process the downloaded file is %s seconds' % timespent)\n\n # Calculate sleep time for next request. Adding 0.1 secs to prevent HTTP Error 403 errors\n wait = float((timeout - timespent) + 0.1)\n\n if wait <= 0:\n wait = 1\n\n # make the plugin thread fast-killable\n i = 0\n w = int(wait)\n while i < w and self.console.working:\n time.sleep(1)\n i += 1\n time.sleep(wait - w)\n\n self.verbose('B3 is down: stopping Cod7Http plugin')", "def download_files(self):", "def download_from_ftp_to_local_SCP(host,port,ftp_path, local_file, mode = 'bin', user = 'root', password = 'root'):\n try:\n scp_obj = SCP(host, port, user, password)\n scp_obj.connect()\n except Exception:\n scp_obj = SCP(host, port, user, password)\n\n if os.path.isdir(local_file):\n print('not support now!')\n else:\n scp_obj.download(local_file, ftp_path, mode)\n scp_obj.close()", "def cmd_logs(args):\n\n remote.show_log(_get_current_project_name(), num=args.num, tail=args.tail)", "def ssh_get(dst = \"../data/\", src = \"/home/hs/date/calve_data.json\", port = 22, hostname = \"168.61.55.8\", username = \"cloud\", password = \"cloud12345678!\"):\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.load_system_host_keys()\n ssh.connect(hostname=hostname, port=port, username=username, password=password,\n pkey=None, key_filename=None, timeout=None, allow_agent=True,\n look_for_keys=True, compress=False)\n\n with SCPClient(ssh.get_transport(), sanitize=lambda x: x) as scp:\n scp.get(remote_path=src, local_path=dst)\n\n scp.close()", "def download_cluster(self, remotepath, localpath, merge=False):\n cget = \"getmerge\" if merge else \"get\"\n if isinstance(remotepath, str):\n filename = os.path.split(localpath)[-1]\n self.execute_command(\n \"hdfs dfs -{2} {0} {1}\".format(remotepath, filename, cget))\n self.download(filename, localpath)\n self.execute_command(\"rm {0}\".format(filename))\n else:\n tod = []\n for afile in remotepath:\n filename = os.path.split(afile)[-1]\n self.execute_command(\n \"hdfs dfs -{2} {0} {1}\".format(afile, filename, cget))\n tod.append(filename)\n self.download(tod, localpath)\n for afile in tod:\n self.execute_command(\"rm {0}\".format(afile))\n\n return remotepath", "def execute_remote(self, project):\n\n\t\tAppLogger.info(\"Downloading \\\"\" + self.__from + \"\\\" to \\\"\" + self.__to + \"\\\" on server \" + self.destination)\n\t\treturn True", "def files(self):\n log.info(\"starting file iteration\")\n ssh = paramiko.SSHClient()\n\n if self.load_system_host_keys:\n log.debug('loading system host keys')\n ssh.load_system_host_keys()\n if self.host_key_auto_add:\n log.debug('setting host key policy to auto add')\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n sshconf = paramiko.SSHConfig()\n # paramiko uses 'USER' environment var to parsing %u, %r\n # when nrpe daemon run the check, that var is not set and results in\n # 'None' user, set it before parsing config file.\n local_user = pwd.getpwuid(os.getuid()).pw_name\n os.environ['USER'] = os.environ.get('USER', local_user)\n with open('/etc/ssh/ssh_config') as f:\n sshconf.parse(f)\n\n # paramiko wrongly parses %u/%r@%h as it use same value for %u and %r\n # replace %r with the configured username\n self.kwargs['key_filename'] = [\n path for path in sshconf.lookup(self.hostname)['identityfile']\n ]\n\n log.info(\"connecting to %s\", self.hostname)\n log.debug(\"kwargs: %s\", str(self.kwargs))\n for key_file in self.kwargs['key_filename'][:]:\n try:\n ssh.connect(**self.kwargs)\n break\n except IOError as e:\n log.info(\"Key %s does not exist, trying another\", key_file)\n try:\n self.kwargs['key_filename'].pop(0)\n except IndexError:\n raise Exception('No more ssh private key to try.'\n 'Make sure good ssh key exist.')\n log.debug(\"opening sftp\")\n ftp = ssh.open_sftp()\n log.debug(\"chdir %s\", self.pwd)\n try:\n ftp.chdir(self.pwd)\n except IOError, e:\n log.error(\"Error going to directory %s: %s\", self.pwd, e)\n return\n\n # optimization. To avoid running fstat for every backup file, I filter\n # out to only test the newest backup for each facility\n files = {}\n log.debug(\"running ls\")\n for fattr in ftp.listdir_attr():\n # a longname looks like:\n # -rw-r--r-- 1 radvd quagga 5586928 Jun 22 06:35\n # postgresql-roundcube-2016-06-22-06_34_47.sql.xz\n if fattr.longname.startswith('d'): # is a directory\n log.debug(\"Skipping directory %s\", fattr.longname)\n continue\n filename = fattr.longname.split()[-1]\n log.debug('processing %s', filename)\n\n f = self.make_file(filename, None)\n if not f:\n log.debug('skipping')\n continue\n key, value = f.items()[0]\n # we may want to run fstat on this filename later on\n f[key]['filename'] = filename\n # keeps only the newest file for each facility\n if (key not in files) or (value['date'] > files[key]['date']):\n log.debug('first or newer.')\n files.update(f)\n else:\n log.debug('was old')\n\n # now fetch fstat for each file, and yield them\n for k, f in files.items():\n log.debug('getting fstat for %s', f['filename'])\n filestat = ftp.stat(f['filename'])\n f['size'] = filestat.st_size\n yield {k: f}", "def pre_download(self, remote_files):\n pass", "def merge_hosts(dp_shell_history: Path, year: int, month: int) -> Path:\n all_hostnames = [\n d\n for d in os.listdir(dp_shell_history)\n if os.path.isdir(f\"{dp_shell_history}/{d}\")\n ]\n possible_log_files = [\n f\"{dp_shell_history}/{H}/{year}/{str(month).zfill(2)}.log\"\n for H in all_hostnames\n ]\n all_log_files = list(filter(os.path.exists, possible_log_files))\n\n if all_log_files == []:\n raise LogsNotFound()\n\n fp_log = Path(f\"/tmp/{SCRIPTNAME}/{year}/{str(month).zfill(2)}.log\")\n\n fp_dir = fp_log.parent\n if not os.path.exists(fp_dir):\n os.makedirs(fp_dir)\n\n gutils.shell(f\"cat {' '.join(all_log_files)} | sort -t: -k 3n > {fp_log}\")\n\n # Protects against possible race condition\n for i in [0.5, 1, 2]:\n if not os.path.exists(fp_log):\n time.sleep(i)\n else:\n break\n else:\n raise RuntimeError(\n f\"Something went wrong. The {fp_log} file does not exist.\"\n )\n\n return fp_log", "def connect_server_backend(servername, ssh_key_file, localpath, remotepath, user, log_full_filename, log_file_path,\n verbose_log_full_filename):\n # crea la directory di salvataggio\n command = \"mkdir -p {localpath}\".format(localpath=localpath)\n os.system(command)\n # imposta il comando di rsync\n command = 'rsync --archive --no-o --no-g -vv -r -K -e \\\"/usr/bin/ssh -i {ssh_key_file}\\\" {user}@{servername}:{remotepath} {localpath} > {log_file_path}logs/log_file_verbose_{servername}_{remotepathclean}_{now}.txt'.format(\n ssh_key_file=ssh_key_file, user=user, servername=servername, localpath=localpath, remotepath=remotepath,\n remotepathclean=remotepath.replace(\"/\", \"_\"), log_file_path=log_file_path,\n now=datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))\n # append_log_info(command, verbose_log_full_filename, log_file_path)\n append_log_info(\"---------------------------------------------\", verbose_log_full_filename, log_file_path)\n datetime_prefix = datetime.datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n log_string = datetime_prefix + \" start transfer from \" + servername + remotepath\n append_log_info(command, verbose_log_full_filename, log_file_path)\n append_log_info(log_string, log_full_filename, log_file_path)\n os.system(command)\n # esegue il comando di controllo dell'occupazione del disco\n datetime_prefix = datetime.datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n log_string = datetime_prefix + \" stop transfer from \" + servername + remotepath\n # append_log_info(command, verbose_log_full_filename, log_file_path)\n append_log_info(log_string, log_full_filename, log_file_path)\n command = 'df -lh | grep md0 >> \\\"{log_file}\\\"'.format(log_file=verbose_log_full_filename)\n os.system(command)\n append_log_info(\"---------------------------------------------\", verbose_log_full_filename, log_file_path)", "def ssh(cmds, bastion_ip, host, username, pem_key):\n cmd = \"ssh -i %s %s@%s\" % (pem_key, username, host)\n if bastion_ip:\n cmd = \"ssh -F ssh_config-metrics %s\" % (host)\n parts = cmd.split(' ')\n parts.append(';'.join(cmds))\n CONSOLE.debug(json.dumps(parts))\n ret_val = subprocess_to_log.call(parts, LOG, host, scan_for_errors=[\n r'lost connection', r'\\s*Failed:\\s*[1-9].*'])\n if ret_val != 0:\n raise Exception(\"Error running ssh commands on host %s. See debug log (%s) for details.\" % (\n host, LOG_FILE_NAME))", "def download_file():\n for lines in urls:\n try:\n req.urlretrieve(lines, '{0}/{1}'.format(folder_path, lines.split('/')[-1]))\n time.sleep(1)\n print ('File - {} - downloaded successfully'.format(lines.split('/')[-1]))\n except urllib.error.HTTPError:\n print('File is missing or not reachable')\n print('Download Complete & Successful!')", "def find_remote_files(remote_path, type, ssh):\n (ssh_in, ssh_out, ssh_err) = ssh.exec_command(\"find %s -name \\\"*\\\" -type %s\" % (remote_path, type))\n files = []\n for file in ssh_out.readlines():\n files.append(file.rstrip())\n return files", "def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None", "def remote_stream(**kwargs):\n # SSH into the Raspberry Pi\n logging.info(\"Conneting to the Raspberry Pi\")\n ssh_client = connect_to_client()\n sftp_client = ssh_client.open_sftp()\n\n if not os.path.exists(kwargs[\"destination_filepath\"]):\n os.makedirs(kwargs[\"destination_filepath\"])\n\n # Create arguments for the command \n destination_filepath = kwargs[\"destination_filepath\"]\n video_length = kwargs[\"video_length\"]\n preprocess_analysis = kwargs[\"preprocess_analysis\"]\n\n # Launch script to check for data in the output\n cmd = f\"python3 check_local_files.py {destination_filepath}\"\n for roi in kwargs[\"roi_locations\"]:\n cmd += f\" {roi}\"\n if kwargs[\"database\"]:\n cmd += \" --database\"\n cmd += f\" --preprocess_analysis {preprocess_analysis}\"\n proc = subprocess.Popen(cmd, shell=True, stdin=None, stdout=None, stderr=None, close_fds=True)\n\n # Copy data from the Raspberry Pi to the local machine\n while True:\n current_datetime = datetime.now(TIMEZONE)\n batch_id = current_datetime.strftime(\"%Y%m%d%H%M%S\")\n pi_data_dir = os.path.join(\"/home/pi/Desktop\", current_datetime.strftime(\"%Y_%m_%d\"))\n local_filepath = os.path.join(kwargs[\"destination_filepath\"], batch_id + \".h264\")\n\n # Make the data directory if it doesn't already exist\n try:\n sftp_client.stat(pi_data_dir)\n except IOError:\n sftp_client.mkdir(pi_data_dir)\n\n pi_filepath = os.path.join(pi_data_dir, batch_id + \".h264\")\n cmd = f\"source /home/pi/.bash_profile; source /home/pi/envs/pulse_tracer/bin/activate; python3 /home/pi/Desktop/capstone/software/data_stream/rpi_camera_collect.py {pi_filepath} --video_length {video_length}\"\n \n logging.info(f\"Starting data collection on Raspberry Pi for {video_length} seconds\")\n stdin, stdout, stderr = ssh_client.exec_command(cmd)\n exit_status = stdout.channel.recv_exit_status() \n\n # If the remote facial detection was successful, read the output directory\n if exit_status == 0:\n # Transfer data from Raspberry Pi to local\n logging.info(\"Copying remote file to local machine\")\n sftp_client.get(pi_filepath, local_filepath + \".partial\")\n os.rename(local_filepath + \".partial\", local_filepath)\n else:\n logging.error(stderr.read())\n raise Exception()\n \n sftp_client.close()\n ssh_client.close()", "def scp(files, bastion_ip, host, username, pem_key):\n cmd = \"scp -i %s %s %s@%s:%s\" % (pem_key,\n ' '.join(files), username, host, '/tmp')\n if bastion_ip:\n cmd = \"scp -F ssh_config-metrics %s %s:%s\" % (\n ' '.join(files), host, '/tmp')\n CONSOLE.debug(cmd)\n ret_val = subprocess_to_log.call(cmd.split(' '), LOG, host)\n if ret_val != 0:\n raise Exception('''Error transferring files to new host %s via SCP.\n See debug log (%s) for details.''' % (host, LOG_FILE_NAME))", "def download_all_files(self):\n self.server_handler.get_sensor_data_from_server()", "def get_upload_source_log(upload_id: int, user: auth_domain.User) -> Response:\n user_string = util.format_user_information_for_logging(user)\n logger.info(\"%s: Download source log [%s].\", upload_id, user_string)\n try:\n workspace: Workspace = database.retrieve(upload_id)\n except IOError:\n logger.error(\"%s: GetSourceLog: There was a problem connecting to\"\n \" database.\", upload_id)\n raise InternalServerError(messages.UPLOAD_DB_CONNECT_ERROR)\n\n except database.WorkspaceNotFound as nf:\n logger.info(\"%s: Workspace not found: '%s'\", upload_id, nf)\n raise NotFound(messages.UPLOAD_NOT_FOUND) from nf\n\n\n filepointer = workspace.log.open_pointer('rb')\n headers = {\n \"Content-disposition\": f\"filename={workspace.log.name}\",\n 'ETag': workspace.log.checksum,\n 'Content-Length': workspace.log.size_bytes,\n 'Last-Modified': workspace.log.last_modified,\n 'ARXIV-OWNER': workspace.owner_user_id\n }\n return filepointer, status.OK, headers", "def download_files(self) -> None:\n\n for name, url in self.files.items():\n print(f\"Download {name.split('/')[-1]}\")\n wget.download(url, os.path.join(\"data\", name))", "def download(self):\n cmd = mccli() + \" d f \" + self.localpath + \" -p \" + self.project.name\n \n set_cli_remote(self.project.remote)\n \n child = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = child.communicate()\n return CLIResult(out, err, child.returncode)", "async def logs(self, ctx):\r\n openfile = open(\"logs.csv\", \"rb\")\r\n logfile = discord.File(fp=openfile, filename='logs')\r\n await ctx.send(file=logfile)", "def downloadToRemoteFileList(self):\n self.remoteWordList = []\n self.remoteDir = {}\n self.ftp.dir('.', self.addItemToRemoteFileList)\n # self.Remote_completerModel.setStringList(self.remoteWordList)", "def download(self, task_id_list=None,\n max_speed=None, print_log=True,\n download_filename_format=\"true\",\n local_path=None, server_path=None,\n engine_type=\"aspera\", server_ip=None, server_port=None,\n network_mode=0):\n self.check_params(task_id_list, server_path)\n local_path = self._check_local_path(local_path)\n self.logger.info(\"[Rayvision_sync start download .....]\")\n self._download_log(task_id_list, local_path)\n\n self._run_download(task_id_list, local_path, max_speed, print_log,\n download_filename_format, server_path,\n engine_type=engine_type, server_ip=server_ip, server_port=server_port,\n network_mode=network_mode)\n self.logger.info(\"[Rayvision_sync end download.....]\")\n return True", "def getArchLogs(self):\n\n # Implement checkFiles() for archs?\n\n # Pull log file\n if self.nbDetails['proc']['archLog'] is not None:\n result = self.c.get(self.nbDetails['proc']['archLog'])\n print(f\"Pulled archive creation log {result.remote} to {result.local}\")\n else:\n print(f\"Archives not yet written.\")", "def download_urls(urls_filename, reverse=True, log_filename='youtube-playlist-download.log'):\n urls_file = open(urls_filename)\n url_lines = urls_file.read().splitlines();\n urls_file.close()\n if reverse:\n url_lines = reversed(url_lines)\n\n logfile = open(log_filename, 'w')\n logfile.write('\\n' + str(datetime.now()) + '\\n')\n logfile.flush()\n\n # use -f best to avoid merging and just get the best overall format (saves time)\n youtube_cmd_with_args = ['youtube-dl', '--ignore-errors', '--ignore-config', '--write-info-json', '--no-mtime', '-f best', '-o ' + get_full_filename()]\n\n try:\n for line in url_lines:\n url_id, title = line.split('\\t')[:2]\n print('Downloading video: \"' + title + '\" with id \"' + url_id + '\"')\n run(youtube_cmd_with_args + [YT_PREFIX + url_id])\n print('Done downloading url:', url_id)\n notify('Done downloading url:' + url_id)\n logfile.write('Downloaded\\t' + url_id + '\\t' + title + '\\n')\n logfile.flush()\n except KeyboardInterrupt as e:\n print(\"Exiting\")\n logfile.close()\n\n logfile.close()", "def retrieveLogs(self, execution, localLogDestination):\n # TODO: Implement this in order to get your logs out. The parent implementation will take care of cpu.log in case\n # profiling was requested. Example:\n #\n # execution.host.getFile( '{0}/log.log'.format( self.getExecutionLogDir( execution ) ),\n # os.path.join( localLogDestination, 'log.log' ), reuseConnection = execution.getRunnerConnection() )\n # client.retrieveLogs(self, execution, localLogDestination)\n #\n # The use of the execution.getRunnerConnection() connection prevents errors with multi-threading.\n #\n # This assumes you have no logs of your own:\n client.retrieveLogs(self, execution, localLogDestination)", "def do_downloads_by_server(self, arg):\n args = arg.split()\n if len(args) != 0:\n common.error('`downloads_by_server` doesn\\'t'\n 'expect any arguments.')\n else:\n servers = self.central_server.download_service.get_servers()\n requests = self.central_server.client_service.get_requests()\n\n if not servers.is_empty():\n print(\"%i server(s):\" % len(\n servers.servers))\n print('')\n for server in servers.servers:\n print('server:')\n print(str(server))\n print('downloads:')\n server_requests = requests.get_requests_from_server(server)\n if server_requests:\n print(tabulate(\n [request.to_row() for request\n in server_requests],\n headers=['Movie', 'Client'],\n tablefmt=\"psql\"))\n print('')\n else:\n print('No downloads\\n')\n else:\n print('There\\'s no available servers')", "def get_url(self,url,output=None):\n parsed_url = urlparse(url)\n hostname = parsed_url[1]\n \n #Make the command\n cmd = \"wget %s -O -\" % url\n (ssh_input,ssh_output,ssh_err) = self.execute_command(cmd)\n \n if(output==None):\n p = urlparse(url)[2]\n filename = os.path.split(p)[1] \n output = filename\n # See if it's ok.\n err = sio.StringIO()\n dat = ssh_err.read(BLOCKSIZE)\n while(dat):\n err.write(dat)\n dat = ssh_err.read(BLOCKSIZE)\n \n err_out = err.getvalue()\n print >> sys.stderr, err_out\n err1 = re.compile(r\"failed\") # Failed to resolve hostname\n err2 = re.compile(r\"404 Not Found\") # File not found\n \n if(err1.search(err_out)):\n raise SSHError(\"ERROR: Failed to retrieve file! Hostname unknown\")\n elif(err2.search(err_out)):\n raise SSHError(\"ERROR: Failed to retrieve file. File not found\")\n # If it didn't fail, read the file.\n \n if(output==\"-\"):\n f = sys.stdout\n else:\n f = open(output,\"w+b\")\n dat = ssh_output.read(BLOCKSIZE)\n while(dat):\n f.write(dat)\n dat = ssh_output.read(BLOCKSIZE)", "def downloader(\n save_dir,\n zip_downloads=None,\n tar_downloads=None,\n file_downloads=None,\n info_message=None,\n force_overwrite=False,\n cleanup=False,\n):\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n if zip_downloads is not None:\n for zip_download in zip_downloads:\n download_zip_file(zip_download, save_dir, force_overwrite, cleanup)\n\n if tar_downloads is not None:\n for tar_download in tar_downloads:\n download_tar_file(tar_download, save_dir, force_overwrite, cleanup)\n\n if file_downloads is not None:\n for file_download in file_downloads:\n download_from_remote(file_download, save_dir, force_overwrite)\n\n if info_message is not None:\n print(info_message)", "def download_from_remote(remote, save_dir, force_overwrite=False):\n if remote.destination_dir is None:\n download_dir = save_dir\n else:\n download_dir = os.path.join(save_dir, remote.destination_dir)\n\n if not os.path.exists(download_dir):\n os.makedirs(download_dir)\n\n download_path = os.path.join(download_dir, remote.filename)\n if not os.path.exists(download_path) or force_overwrite:\n # If file doesn't exist or we want to overwrite, download it\n with DownloadProgressBar(\n unit='B', unit_scale=True, unit_divisor=1024, miniters=1\n ) as t:\n try:\n urllib.request.urlretrieve(\n remote.url,\n filename=download_path,\n reporthook=t.update_to,\n data=None,\n )\n except Exception as e:\n error_msg = \"\"\"\n mirdata failed to download the dataset!\n Please try again in a few minutes.\n If this error persists, please raise an issue at\n https://github.com/mir-dataset-loaders/mirdata,\n and tag it with 'broken-link'.\n \"\"\"\n print(error_msg)\n raise e\n\n checksum = md5(download_path)\n if remote.checksum != checksum:\n raise IOError(\n '{} has an MD5 checksum ({}) '\n 'differing from expected ({}), '\n 'file may be corrupted.'.format(download_path, checksum, remote.checksum)\n )\n return download_path", "def _forward_log(self):\n\n if self.log is None:\n return\n\n fd = None\n try:\n fd = os.open(\"%s.out\" % self.vm_log_path, os.O_RDONLY)\n data = \"\"\n while True:\n new_data = os.read(fd, 4096)\n if new_data == \"\":\n self._log_to_file(data)\n return\n\n data += new_data\n lines = data.split(\"\\n\")\n for line in lines[:-1]:\n self._log_to_file(line)\n data = lines[-1]\n\n finally:\n if fd is not None:\n os.close(fd)", "def get_files(self, step):\n dht = get_remote_node(self.dht_ip, self.dht_port)\n files = dht.get(get_hash(filestep + \"|\" + str(step)))\n return files", "def download_file(remote_file, local_file=None, key_filename=None, hostname=None, username=None) -> None:\n if local_file is None: # pragma: no cover\n local_file = remote_file\n with get_connection(\n hostname=hostname, username=username, key_filename=key_filename\n ) as connection: # pragma: no cover\n try:\n sftp = connection.open_sftp()\n sftp.get(remote_file, local_file)\n finally:\n sftp.close()", "def _Download( self ):\n self._DownloadPipe += PackageUtil.ExecuteSimpleCommand( \"git\", [\"clone\", \"git@github.com:mastbaum/avalanche.git\", self.GetInstallPath()], None, os.getcwd() )\n return", "def download_runlog(runlog_id, app_name, file_name):\n\n client = get_api_client()\n app = _get_app(client, app_name)\n app_id = app[\"metadata\"][\"uuid\"]\n\n if not file_name:\n file_name = \"runlog_{}.zip\".format(runlog_id)\n\n res, err = client.application.download_runlog(app_id, runlog_id)\n if not err:\n with open(file_name, \"wb\") as fw:\n fw.write(res.content)\n click.echo(\"Runlogs saved as {}\".format(highlight_text(file_name)))\n else:\n LOG.error(\"[{}] - {}\".format(err[\"code\"], err[\"error\"]))", "def copy_file_to_server():\r\n utils.system_output('mv /home/chronos/user/Downloads/* /usr/local/autotest/results/default/',ignore_status=True)\r\n logging.info(\"Video Copied to Log location\")", "def download(conn, remotepath, localpath, filter = None, ignore_invalid = False, chunk_size = 16000):\n if conn.modules.os.path.isdir(remotepath):\n download_dir(conn, remotepath, localpath, filter)\n elif conn.modules.os.path.isfile(remotepath):\n download_file(conn, remotepath, localpath, chunk_size)\n else:\n if not ignore_invalid:\n raise ValueError(\"cannot download %r\" % (remotepath,))", "async def download_files(self, download_path):\n\n async with vt.Client(self.apikey) as client:\n while True:\n file_hash = await self.queue.get()\n file_path = os.path.join(download_path, file_hash)\n with open(file_path, \"wb\") as f:\n await client.download_file_async(file_hash, f)\n self.queue.task_done()", "def put_data(file_name):\n # Create file descriptor for table file \n loc_file = open(file_name + '.csv', 'rw')\n\n # Create file descriptor for log file \n log_file2 = open('logfile.txt', 'r')\n\n output = \"files opened\\n\"\n print(output)\n os.write(log_file, output)\n\n # Holds private key used to connect to server\n key = paramiko.RSAKey.from_private_key_file('keyp1.pem')\n\n # Try establishing connection to server\n try:\n # Holds Transport socket\n t = paramiko.Transport(('aws.cwardcode.com', 22))\n # Attempt to connect with key\n t.connect(username='ubuntu', pkey=key)\n # Create sftp session\n c = paramiko.SFTPClient.from_transport(t)\n\n except socket.gaierror as e:\n # getAddrInfo error occured, so let's collect data and exit\n output = \"GetAddressInfo error occurred: \" + str(e)\n print(output)\n os.write(log_file, output)\n emergency_put()\n sys.exit(2)\n\n except socket.error as e:\n # Socket error occured, so let's collect data and exit\n output = \"Connection error occurred: \" + str(e)\n print(output)\n os.write(log_file, output)\n emergency_put()\n sys.exit(3)\n\n # Change into where data is stored\n c.chdir('updata')\n\n output = \"changed to updata\\n\"\n print(output)\n os.write(log_file, output)\n\n # Get remote file, and set mode to append\n rem_file = c.file(file_name + '.csv', mode='a', bufsize=1)\n\n output = \"opened tablefile\\n\"\n print(output)\n os.write(log_file, output)\n #Get remote log file and set mode to overwrite\n rem_log_file = c.file('logfile.txt', mode='w', bufsize=1)\n output = \"opened logfd\\n\"\n print(output)\n os.write(log_file, output)\n\n # Write data and clean up\n rem_file.write(loc_file.read())\n rem_log_file.write(log_file2.read())\n output = \"Wrote files to server\\n\"\n\n #Flush/close data file streams\n os.write(log_file, output)\n rem_file.flush()\n rem_file.close()\n loc_file.close()\n\n #Flush/close log file streams\n rem_log_file.flush()\n rem_log_file.close()\n rem_log_file.close()\n\n output = \"File streams closed\\n\"\n os.write(log_file, output)\n # Remove file after it's uploaded\n os.remove(file_name + '.csv')\n output = \"Removed CSV: \" + file_name + \"\\n\"\n os.write(log_file, output)\n\n #Close client\n c.close()\n\n # Close transport\n t.close()\n\n return 0", "def download_remote_files(output_dir, files):\n logging.debug(f\"Try to download files: {files}\")\n\n # Create list of remote and local files\n base_url = \"https://storage.googleapis.com/\"\n urls = [base_url+file for file in files]\n local_files = [output_dir + file.split(\"/\")[-1] for file in files]\n\n\n async def get(session, url, local_f):\n if os.path.isfile(local_f):\n logging.info(\"Raw file {} exists locally\".format(local_f))\n pass\n else:\n # Download file\n async with session.get(url=url) as response:\n if response.status == 200:\n resp = await response.read()\n with open(local_f, \"wb\") as outfile:\n outfile.write(resp)\n\n\n async def main(urls, local_files):\n conn = aiohttp.TCPConnector(limit=30)\n timeout = aiohttp.ClientTimeout(total=None, connect=None, sock_connect=30, sock_read=10)\n async with aiohttp.ClientSession(connector=conn, timeout=timeout) as session:\n _ = await asyncio.gather(*[get(session, urls[f], local_files[f]) for f in range(len(urls))])\n\n asyncio.run(main(urls, local_files))\n return local_files", "def print_ssh_commands(region_name, instance_ids, ssh_key_name=None,\n sally_ip=None, sally_key_file=None, sally_port=None):\n if not sally_key_file:\n sally_key_file = '$HOME/.ssh/%s' % ssh_key_name\n sally_port = int(sally_port) if sally_port else 22\n instance_domain = None\n ec2_client = boto3.client('ec2', region_name=region_name)\n resp = ec2_client.describe_instances(InstanceIds=instance_ids)\n for reserv in resp['Reservations']:\n for instance in reserv['Instances']:\n instance_domain = instance['PrivateDnsName']\n LOGGER.info(\"connect to %s with: ssh -o ProxyCommand='ssh -i %s\"\\\n \" -p %d -q -W %%h:%%p %s' -i $HOME/.ssh/%s ec2-user@%s\",\n instance['InstanceId'], sally_key_file, sally_port, sally_ip,\n ssh_key_name, instance_domain)", "def _download_file(self, artifact_path, local_path):\n full_path = self.base_artifact_path / artifact_path\n with self.managed_folder.get_file(str(full_path)) as remote_file:\n with open(local_path, \"wb\") as local_file:\n for line in remote_file:\n local_file.write(line)", "def download_from_url(url, output_path):\n\n print('Pulling data from {} to {}'.format(url, output_path))\n wget.download(url, output_path)\n print('done')", "def pull(args):\n cache = set(args.remote_cache).union(set(args.cache))\n for path in sorted(cache):\n if not os.path.exists(os.path.join(args.base, path)) and remote_exists(args.sftp, os.path.join(args.remote_base, path)):\n print('pull: {}'.format(path))\n ensure_local(os.path.dirname(os.path.join(args.base, path)))\n args.sftp.get(\n os.path.join(args.remote_base, path),\n os.path.join(args.base, path)\n )\n args.cache.append(path)\n args.update = True\n return", "def remote_pull(*keys):", "def download_folder(self, instance, folder, where, local):\n\n instance = self.get_instance(instance)\n\n try:\n if instance.get('address'):\n username = instance.get('address') + \"@\" + instance.get('credentials').get('username')\n key = instance.get('credentials').get('publickey')\n subprocess.check_output(\n [\"scp\", key, '-r', username + \":\" + self.default_path_aws + where + folder, local])\n else:\n username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')\n key = instance.get('credentials').get('EC2_SECRET_KEY')\n # output = os.popen(\"ls\"+ \" | \" + \"ssh\"+ \" -i \"+ key +\" \"+ username).read()\n subprocess.check_output(\n [\"scp\", \"-i\", key, '-r', username + ':' + self.default_path_aws + where + folder, local])\n return \"Success to download folder \" + self.default_path_aws + where + folder + \" to \" + local\n except:\n return \"Faile to access the instance\"", "def _DisplayPullResult(download_folder):\n utils.PrintColorString(\n \"Download logs to folder: %s \\nYou can look into log files to check \"\n \"AVD issues.\" % download_folder)", "def get_server_logs(self):\n self.response.content\n binary_body = re.split('--==.*==', self.response.content)[2].split('\\r\\n')[5]\n\n f = StringIO.StringIO()\n f.write(bytearray(binary_body))\n\n memory_zip = ZipFile(f)\n zip_content = {name: memory_zip.read(name) for name in memory_zip.namelist()}\n oracc_log = zip_content['oracc.log']\n request_log = zip_content['request.log']\n\n # Check if server returns a lemmatised file\n autolem = None \n for key, value in zip_content.iteritems():\n if key.endswith(\"autolem.atf\"):\n autolem = value\n\n print zip_content.keys()\n print \"@\"*30\n print oracc_log\n print \"@\"*30\n print request_log\n print \"@\"*30\n if autolem:\n print autolem\n print \"@\"*30\n\n return oracc_log, request_log, autolem", "def download_file(url_path):\n local_filename = url_path.split('/')[-3] + \"-\" + url_path.split('/')[-1]\n local_filename = OUT_DIR + local_filename\n print local_filename\n url = \"https://commoncrawl.s3.amazonaws.com/\" + url_path\n # NOTE the stream=True parameter\n req = requests.get(url, stream=True)\n with open(local_filename, 'wb') as write_f:\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n write_f.write(chunk)\n write_f.close()\n return local_filename", "def DisplayLog(ssh, log_file, no_prompts=False):\n warning_msg = (\"It will stream log to show on screen. If you want to stop \"\n \"streaming, please press CTRL-C to exit.\\nPress 'y' to show \"\n \"log or read log by myself[y/N]:\")\n if no_prompts or utils.GetUserAnswerYes(warning_msg):\n ssh.Run(\"tail -f -n +1 %s\" % log_file, show_output=True)", "def download(all):\n print(\"Downloading\")", "def get_remote_file(sid, path):\n with slycat.web.server.remote.get_session(sid) as session:\n return session.get_file(path)", "def download_xes_log():\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n if Configuration.enable_download:\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n if lh.check_user_enabled_download(user, process):\n content = lh.get_handler_for_process_and_session(process, session).download_xes_log()\n return jsonify({\"content\": content.decode('utf-8')})\n return jsonify({\"content\": \"\"})", "def log_download(self, download):\n with self._conn.begin():\n self._conn.execute(\n \"VALUES (log_download(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s))\",\n (\n sanitize(download.filename),\n download.host,\n download.timestamp.astimezone(UTC).replace(tzinfo=None),\n sanitize(download.arch),\n sanitize(download.distro_name),\n sanitize(download.distro_version),\n sanitize(download.os_name),\n sanitize(download.os_version),\n sanitize(download.py_name),\n sanitize(download.py_version),\n sanitize(download.installer_name),\n sanitize(download.installer_version),\n sanitize(download.setuptools_version),\n ))", "def get_remote_file(url):\n # Disable the proxies by not trusting the env\n session = requests.Session()\n session.trust_env = False\n\n # Make the request\n requests.packages.urllib3.disable_warnings()\n try:\n r = session.get(url, verify=False)\n except requests.exceptions.RequestException as e:\n # catastrophic error. bail.\n print(e)\n sys.exit(1)\n\n r = session.get(url, verify=False)\n remote_file = r.text\n return remote_file", "def test_retrieve_files_single(self):\n os.makedirs('/tmp/remote_pacha/localhost/another_dir')\n os.makedirs('/tmp/remote_pacha/localhost/single_dir')\n remote_file = open('/tmp/remote_pacha/localhost/single_dir/remote.txt', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n self.assertTrue(os.path.isfile('/tmp/remote_pacha/localhost/single_dir/remote.txt'))\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha',\n directory='single_dir')\n run.retrieve_files()\n result = os.path.isfile('/tmp/localhost/single_dir/remote.txt')\n line = open('/tmp/localhost/single_dir/remote.txt')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote file\")\n self.assertTrue(result)", "def test_retrieve_files_move_existing_file(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n os.mkdir('/tmp/localhost')\n\n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n result_3 = os.path.isdir('/tmp/localhost.%s' % strftime('%H%M%s'))\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_3)\n self.assertTrue(result_2)\n self.assertTrue(result_1)" ]
[ "0.73364204", "0.6596629", "0.6111099", "0.60842896", "0.6064838", "0.6026996", "0.60165", "0.59927064", "0.59570765", "0.59108406", "0.59068495", "0.58801275", "0.5834578", "0.57861924", "0.5749585", "0.573902", "0.5687385", "0.56273234", "0.5601198", "0.5537666", "0.55214125", "0.5501977", "0.5491566", "0.5452155", "0.5445011", "0.5423388", "0.5414817", "0.5411799", "0.54074377", "0.540065", "0.54003656", "0.53932476", "0.5388808", "0.5385779", "0.53744143", "0.5373443", "0.536675", "0.53657806", "0.535526", "0.53294504", "0.53072596", "0.5306569", "0.5297115", "0.5276325", "0.5268768", "0.5258367", "0.5250536", "0.5244335", "0.52403104", "0.52290666", "0.5222456", "0.52212155", "0.5210475", "0.5204109", "0.519595", "0.51815385", "0.51741505", "0.51660144", "0.5142787", "0.51387894", "0.5127118", "0.5124127", "0.51196826", "0.51193446", "0.51164395", "0.5110207", "0.5106659", "0.50993335", "0.5097825", "0.50884867", "0.5081011", "0.50727284", "0.5063975", "0.50626236", "0.5057567", "0.5046099", "0.50308967", "0.5023771", "0.50229037", "0.501925", "0.50188065", "0.5013043", "0.50001603", "0.49886423", "0.49856552", "0.49849612", "0.49848768", "0.49828148", "0.49797308", "0.4977617", "0.49680763", "0.49648988", "0.49565443", "0.49509525", "0.49496034", "0.4948074", "0.4947945", "0.49449068", "0.49436024", "0.49426216" ]
0.7495217
0
r""" Displays image either individually or in a collated grid.
def visualize(imgobjs, cols=4, collated=True, size=None): ## Separate into list of single instance image objects imgs = [] if isinstance(imgobjs, list): for io in imgobjs: imgs += images._create_img_list(io) else: imgs = images._create_img_list(imgobjs) ## Grid layout settings. Sets N, N_rows, N_cols N = len(imgs) assert N > 0 if not size: size = [0, 0] # H, W for img in imgs: _, _, H, W = get_dimensions(img) size[0] += H size[1] += W size = [int(d/len(imgs)) for d in size] else: assert len(size) == 2 N_cols = cols if cols else 4 if N < 4: N_cols = N N_rows = math.ceil(N/N_cols) print(f"Cols: {N_cols}, Rows: {N_rows}") ## Display Figure figure = plt.figure(figsize=(15, 10)) for i in range(N): dims = images.get_dimensions(imgs[i])[1:] title = f"[Image {i+1}/{N}]" if isinstance(imgs[i], str): title = f"[Image {i+1}/{N}] {files.get_filename(imgs[i])}" title += f"\n shape{dims}" img = images.to_np(imgs[i], size=size, color='rgb') subplt = figure.add_subplot(N_rows, N_cols, i+1) subplt.set_title(title, fontsize=10) subplt.axis('off') plt.imshow(img) figure.tight_layout() # plt.subplots_adjust(wspace=.25, hspace=.5) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_images_in_grid(imgs, row, col):\n if len(imgs) != (row * col):\n raise ValueError(f\"Invalid imgs len:{len(imgs)} col:{row} row:{col}\")\n\n for i, img in enumerate(imgs):\n plot_num = i + 1\n plt.subplot(row, col, plot_num)\n plt.tick_params(labelbottom=False) # remove x axis\n plt.tick_params(labelleft=False) # remove y axis\n plt.imshow(img)\n plt.show()", "def grid(images, cols = 2, save = False, filename = \"\", show = False):\n \n rows = ceil(len(images) / cols)\n \n fig, ax = plt.subplots(rows, 1)\n\n index = 0\n element = []\n for row in range(rows):\n for col in range(cols): \n if index < len(images):\n element.append(images[index])\n index += 1\n \n stack = np.hstack(tuple(element))\n ax[row].axis('off')\n ax[row].imshow(stack)\n element = []\n \n plt.tight_layout()\n \n if save:\n fig.savefig(filename)\n\n if show:\n plt.show(fig)\n \n return 0", "def show_image_grid(imgs):\n grd = make_grid(imgs)\n npimg = grd.numpy()\n plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')\n plt.ion()\n plt.show()", "def display_image(df, fname_col, img_dir, n):\n\t# Display some train images\n\tnrows = 1+n//20 \n\tfig, axs = plt.subplots(nrows,20, figsize=(20,1.2*nrows),\n\t\t\t\t\t\t facecolor='w', edgecolor='k')\n\taxs = axs.ravel()\n\n\tfor idx, filename in enumerate (df[fname_col][0:n].values):\n\n\t\tif not os.path.isfile(img_dir+filename):\n\t\t\tlogger.error(\"path {} does not exit\".format(img_dir+filename))\n\t\t\t\t\t\t\n\t\timg = mpimg.imread(img_dir + filename)\n\n\t\taxs[idx].imshow(img)\n\t\taxs[idx].set_axis_off()\n\t \n\tplt.subplots_adjust(wspace=0, hspace=0)\n\tplt.show()", "def display_image(self, img, img_pos):\n image = tk.Label(self.top, image=img)\n image.grid(row=img_pos[0], column=img_pos[1],\n columnspan=img_pos[2], rowspan=img_pos[3])", "def imshow_grid(images, shape=[2, 2], name='default', save=False):\n fig = plt.figure()\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n img = images[i]\n if img.shape[0]==3:\n img = img.transpose(1, 2, 0)\n img = (img - img.min())/(img.max() - img.min())\n grid[i].imshow(img, vmin=-132, vmax = 164) # The AxesGrid object work as a list of axes.\n\n plt.show()", "def imshow_grid(images, shape=[2, 8]):\n fig = plt.figure(1)\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n grid[i].imshow(images[i]) # The AxesGrid object work as a list of axes.\n\n plt.show()", "def imshow_grid(images, shape=[2, 8]):\n fig = plt.figure(1)\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n grid[i].imshow(images[i]) # The AxesGrid object work as a list of axes.\n\n plt.show()", "def display(self, image):\n raise NotImplementedError()", "def Show_multi_channel_image(self, allimages, enhance=False, num =None):\n import imgproc\n MAX_IMAGE_ROW = 8\n nchannel = allimages.shape[-1]\n nrow = (nchannel - 1)/MAX_IMAGE_ROW + 1\n cur_fig = plt.gcf()\n cur_fig.set_size_inches(min(16,MAX_IMAGE_ROW*1.6),min(nrow*1.6, 10))\n if enhance == 'imgeq':\n f = lambda(x):imgproc.imgeq(x)/255.0\n elif enhance == 'maptorange':\n f = lambda(x):imgproc.maptorange(x,[0,1])\n else:\n f = lambda x: x\n for channel in range(nchannel):\n plt.subplot(nrow, MAX_IMAGE_ROW, channel + 1)\n fr1 = plt.gca()\n fr1.axes.get_xaxis().set_visible(False)\n fr1.axes.get_yaxis().set_visible(False)\n plt.title('%d' % (channel + 1))\n if num is not None:\n plt.imshow( f(allimages[...,channel]/num[channel])) \n else:\n plt.imshow( f(allimages[...,channel]))", "def display_images(filenames):\n for filename in filenames:\n display(Image(filename))", "def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels", "def show_imgs(imgs, row, col):\n if len(imgs) != (row * col):\n raise ValueError(\n \"Invalid imgs len:{} col:{} row:{}\".format(len(imgs), row, col))\n\n for i, img in enumerate(imgs):\n plot_num = i+1\n plt.subplot(row, col, plot_num)\n plt.tick_params(labelbottom=False) # x軸の削除\n plt.tick_params(labelleft=False) # y軸の削除\n plt.imshow(img)\n plt.show()", "def display(self):\n display(self.image)", "def collage(images, cols = 2, save = False, filename = \"\", show = False):\n \n rows = ceil(len(images) / cols)\n \n fig, ax = plt.subplots(rows, cols)\n\n index = 0\n\n for row in range(rows):\n for col in range(cols):\n \n if index < len(images):\n ax[row, col].imshow(images[index])\n \n ax[row, col].axis('off')\n index += 1\n plt.tight_layout()\n \n if save:\n fig.savefig(filename)\n\n if show:\n plt.show(fig)\n \n return 0", "def gridPlot6(img_stack):\r\n F = plt.figure(figsize = (20,20))\r\n F.subplots_adjust(left = 0.05, right = 0.95)\r\n grid = ImageGrid(F, 142, nrows_ncols = (2,3), axes_pad = 0.0, share_all = True,\r\n label_mode = \"L\", cbar_location = \"top\", cbar_mode = \"single\")\r\n \r\n i = 0\r\n for img in img_stack[0:6]:\r\n im = grid[i].imshow(img, interpolation = \"nearest\", vmin = 0, vmax = 255)\r\n i += 1 \r\n grid.cbar_axes[0].colorbar(im)\r\n plt.savefig('gplot16.png')\r\n if 'gplot16.png' in os.listdir():\r\n plt.savefig('gplot16_2.png')\r\n# for cax in grid.cbar_axes:\r\n# cax.toggle_label(False)\r\n return", "def display_imgs(img_dir,img_list):\n for img in img_list:\n display_img(img_dir, img)", "def gridPlot12(img_stack):\r\n F = plt.figure(figsize = (30,30))\r\n F.subplots_adjust(left = 0.05, right = 0.95)\r\n grid = ImageGrid(F, 142, nrows_ncols = (3,4), axes_pad = 0.0, share_all = True,\r\n label_mode = \"L\", cbar_location = \"top\", cbar_mode = \"single\")\r\n \r\n i = 0\r\n for img in img_stack[0:12]:\r\n im = grid[i].imshow(img, interpolation = \"nearest\", vmin = 0, vmax = 255)\r\n i += 1 \r\n grid.cbar_axes[0].colorbar(im)\r\n plt.savefig('gplot12.png')\r\n# for cax in grid.cbar_axes:\r\n# cax.toggle_label(False)\r\n return", "def generate_image_grid(sess, df, filenames,op, op2):\n #x_points = np.arange(0, 1, 1.5).astype(np.float32)\n #y_points = np.arange(0, 1, 1.5).astype(np.float32)\n\n nx, ny = 12, 1\n #plt.subplot()\n gs = gridspec.GridSpec(nx, ny, hspace=1, wspace=0.05)\n # input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n #\n # plt.imshow(np.array(df[0].tolist()).reshape(28, 28), cmap='gray')\n # plt.show()\n # x = sess.run(op, feed_dict={decoder_input: input_x[0].reshape(1,2)})\n # img = np.array(x.tolist()).reshape(28, 28)\n #\n # plt.imshow(img, cmap='gray')\n # plt.show()\n\n \"\"\" grid \"\"\"\n input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n for i, g in enumerate(gs):\n\n x = sess.run(op, feed_dict={decoder_input: input_x[i].reshape(1,2)})\n ax = plt.subplot(g)\n img = np.array(x.tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()\n\n for i, g in enumerate(gs):\n\n ax = plt.subplot(g)\n img = np.array(df[i].tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()", "def display_images(images, imageConversion=cv.COLOR_BGR2RGB, titles=[], columns=4, rows=None, show=True):\n if not show:\n return\n if imageConversion is not None:\n images = [cv.cvtColor(img, imageConversion) for img in images]\n\n # append filtered image\n if rows is None:\n rows = ceil(float(len(images)) / columns)\n\n try:\n for i in xrange(len(images)):\n plt.subplot(rows,columns,i+1),plt.imshow(images[i],'gray')\n if titles:\n plt.title(titles[i])\n plt.xticks([]),plt.yticks([])\n plt.show()\n except:\n logging.exception(\"Could not plot / show images. Saving instead.\")\n save_plt_figure(plt, \"img_show\")", "def show_image(self, idx):\n image, target = self.__getitem__(self, idx)\n im_h, im_w, _ = image.size()\n labels_num = target['labels']\n rescale = torch.tensor([[im_w, im_h, im_w, im_h]])\n bboxs = target['boxes'] * rescale\n img = image.permute(1, 2, 0).numpy()\n for i, bboxe in enumerate(bboxs):\n x, y, xm, ym = bboxe\n label = class_name[int(labels_num[i])]\n plot_one_box((int(x), int(y), int(xm), int(ym)), img, label=label, line_thickness=3)\n cv2.imshow('image', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def show_images(imgs, nrows, ncols, figsize=None):\n figsize = (ncols, nrows)\n _, figs = plt.subplots(nrows, ncols, figsize=figsize)\n for i in range(nrows):\n for j in range(ncols):\n figs[i][j].imshow(imgs[i*ncols+j].asnumpy())\n figs[i][j].axes.get_xaxis().set_visible(False)\n figs[i][j].axes.get_yaxis().set_visible(False)\n plt.show()", "def visualize_image(images, save_name):\n dim = images.shape[0]\n n_image_rows = int(np.ceil(np.sqrt(dim)))\n n_image_cols = int(np.ceil(dim * 1.0 / n_image_rows))\n gs = gridspec.GridSpec(n_image_rows, n_image_cols, top=1., bottom=0.,\n right=1., left=0., hspace=0., wspace=0.)\n\n for g, count in zip(gs, range(int(dim))):\n ax = plt.subplot(g)\n ax.imshow(images[count, :].astype(np.float32).reshape((28, 28)))\n ax.set_xticks([])\n ax.set_yticks([])\n plt.savefig(save_name + '_vis.png')", "def show_images(images, cols = 1, titles = None):\n import matplotlib.pyplot as plt\n import numpy as np\n \n assert((titles is None) or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Frame %d' % i for i in range(n_images)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(np.ceil(n_images/float(cols)), cols, n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title, size = 50)\n a.axis('off')\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.show()", "def show_images(images, level,cols = 1,titles = None):\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.savefig(\"SteerablePyramid/level\"+ str(level) +\".png\")\n plt.clf()", "def show_images(images, cols = 1, titles = None):\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.show()", "def show_images(images, cols = 1, titles = None):\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.show()", "def gridPlot48(img_stack):\r\n F = plt.figure(figsize = (50,50))\r\n F.subplots_adjust(left = 0.05, right = 0.95)\r\n grid = ImageGrid(F, 142, nrows_ncols = (6,8), axes_pad = 0.0, share_all = True,\r\n label_mode = \"L\", cbar_location = \"top\", cbar_mode = \"single\")\r\n \r\n i = 0\r\n for img in img_stack[0:48]:\r\n im = grid[i].imshow(img, interpolation = \"nearest\", vmin = 0, vmax = 255)\r\n i += 1 \r\n grid.cbar_axes[0].colorbar(im)\r\n plt.savefig('gplot48.png')\r\n# for cax in grid.cbar_axes:\r\n# cax.toggle_label(False)\r\n return", "def matplotlibDisplayMulti(imgs, titles=None, colorFlag='gray'):\n if titles is None:\n titles = []\n for i in range(len(imgs)):\n titles.append(\"IMAGE \" + str(i))\n for i in range(len(imgs)):\n plt.subplot(1, len(imgs), 1+i)\n plt.imshow(imgs[i], colorFlag)\n plt.title(titles[i])\n plt.xticks([])\n plt.yticks([])\n plt.show()", "def display_image ( X ):\r\n\t# on teste que le tableau contient bien 256 valeurs\r\n\tif X.size != 256:\r\n\t\traise ValueError ( \"Les images doivent etre de 16x16 pixels\" )\r\n\r\n\t# on cree une image pour imshow: chaque pixel est un tableau a 3 valeurs\r\n\t# (1 pour chaque canal R,G,B). Ces valeurs sont entre 0 et 1\r\n\tY = X / X.max ()\r\n\timg = np.zeros ( ( Y.size, 3 ) )\r\n\tfor i in range ( 3 ):\r\n\t\timg[:,i] = X\r\n\r\n\t# on indique que toutes les images sont de 16x16 pixels\r\n\timg.shape = (16,16,3)\r\n\r\n\t# affichage de l'image\r\n\tplt.imshow( img )\r\n\tplt.show ()", "def display_cropped_img(i):\n image = PIL.Image.open(testing_img_paths[i])\n image = image.crop(box=(313,99,825,611))\n image = image.resize((256,256))\n display(image)", "def show_images(images, cols=1, titles=None):\n assert ((titles is None) or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1, n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images / float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.show()", "def show_images(images, cols=1, titles=None):\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.show()", "def show_images(images, cols = 1, titles = None):\n params = {'axes.titlesize': 8,\n 'axes.labelsize': 8,\n 'font.size': 8,\n 'legend.fontsize': 8,\n 'xtick.labelsize': 8,\n 'ytick.labelsize': 8,\n 'font.family': 'DejaVu Serif',\n 'font.serif': 'Computer Modern',\n }\n plt.rcParams.update(params)\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n plt.title(\"Point Shift Sweeps from -30 to 30\")\n \n for n, (image, title) in enumerate(zip(images, titles)):\n \n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n a.get_yaxis().set_visible(False)\n a.get_xaxis().set_visible(False)\n\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image, origin='lower') \n fig.set_size_inches(np.array(fig.get_size_inches()))\n \n\n plt.show()", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n self.command(\n const.COLUMNADDR, 0x00, self.width-1, # Column start/end address\n const.PAGEADDR, 0x00, self.pages-1) # Page start/end address\n\n pix = list(image.getdata())\n step = self.width * 8\n buf = []\n for y in xrange(0, self.pages * step, step):\n i = y + self.width-1\n while i >= y:\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[i + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n i -= 1\n\n self.data(buf)", "def gridPlot16(img_stack):\r\n F = plt.figure(figsize = (30,30))\r\n F.subplots_adjust(left = 0.05, right = 0.95)\r\n grid = ImageGrid(F, 142, nrows_ncols = (4,4), axes_pad = 0.0, share_all = True,\r\n label_mode = \"L\", cbar_location = \"top\", cbar_mode = \"single\")\r\n \r\n i = 0\r\n for img in img_stack[0:16]:\r\n im = grid[i].imshow(img, interpolation = \"nearest\", vmin = 0, vmax = 255)\r\n i += 1 \r\n grid.cbar_axes[0].colorbar(im)\r\n plt.savefig('gplot16.png')\r\n if 'gplot16.png' in os.listdir():\r\n plt.savefig('gplot16_2.png')\r\n# for cax in grid.cbar_axes:\r\n# cax.toggle_label(False)\r\n return", "def show_multiple_image_with(row=1, col=1, images=[], titles=[]):\n fig, ax = plt.subplots(row, col, figsize=(10, 10), sharex=True, sharey=True)\n image_counter = 0\n for i in range(row):\n for j in range(col):\n ax[i, j].imshow(images[image_counter])\n ax[i, j].set_title(titles[image_counter])\n\n for a in ax.ravel():\n a.set_axis_off()\n\n plt.tight_layout()\n plt.show()", "def displayImages(self):\n\n plt.figure(figsize=(8,6))\n plt.subplot(1,2,1)\n plt.imshow( self.original_image, cmap=\"gray\")\n plt.title(\"Original Image\")\n plt.subplot(1,2,2)\n plt.imshow( self.blurred_image, cmap=\"gray\")\n plt.title(\"Blurred Image\")", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def plot_images_grid(x: torch.tensor, export_img, title: str = '', nrow=6, padding=2, normalize=False, pad_value=0, save = True, apply_transforms = False):\n logger = logging.getLogger()\n global global_counter\n #if apply_transforms:\n # #global_counter = 0\n \n # transform = transforms.Compose([\n # #transforms.ToPILImage(),transforms.Lambda(lambda x: transforms.functional.adjust_brightness(x, brightness_factor = 1.1)),\n # #transforms.Lambda(lambda x: normalize_to_zero_one_range(x)),\n # transforms.Lambda(lambda x: tensor_to_img(x)),\n # transforms.Lambda(lambda x: save_img_patch(x, prefix = 'original')),\n # transforms.Lambda(lambda x: generate_NCUT_segmented_image(x)),\n # transforms.Lambda(lambda x: save_img_patch(x, prefix = 'NCUT')),\n # #transforms.Lambda(lambda x: draw_feature_contours(x)),\n # transforms.ToTensor() \n # ])\n \n # for i in range(x.shape[0]): \n # try:\n # x[i] = transform(x[i])\n # except:\n # logger.error(\"Exception occurred while appliying transform {}\".format( sys.exc_info()[0]))\n # logger.error(\"Was processing image number {} with global counter {}\".format(str(i), global_counter))\n\n grid = make_grid(x, nrow=nrow, padding=padding, normalize=normalize, pad_value=pad_value)\n npgrid = grid.detach().cpu().numpy()\n\n plt.imshow(np.transpose(npgrid, (1, 2, 0)), interpolation='nearest')\n\n ax = plt.gca()\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n\n if not (title == ''):\n plt.title(title)\n if (save == True):\n plt.savefig(export_img, bbox_inches='tight', pad_inches=0.1)\n else:\n plt.show()\n plt.clf()", "def show_imagegrid_dataset(dataset,\n num=10,\n shuffle=True,\n classes='auto',\n figsize=None,\n fontsize=20,\n image_attr={'cmap': plt.cm.Greys_r}):\n sample = dataset[0]\n if isinstance(sample, tuple) and len(sample) == 2:\n images_per_class = get_labeled_imagegrid(dataset,\n num=num,\n shuffle=shuffle,\n classes=classes)\n num = min(num, max(map(len, images_per_class.values())))\n classes = list(images_per_class.keys())\n\n if figsize is None:\n figsize = (2 * num, 2 * len(classes))\n fig, axs = plt.subplots(figsize=figsize, nrows=len(classes), ncols=num)\n if len(classes) == 1:\n axs = np.expand_dims(axs, 0)\n if num == 1:\n axs = np.expand_dims(axs, -1)\n for i, (class_name, class_images) in enumerate(images_per_class.items()):\n for j, img in enumerate(class_images):\n show_image(img, axs[i][j], image_attr)\n axs[i][0].set_ylabel(str(class_name), fontsize=fontsize)\n elif isinstance(sample, (Image, torch.Tensor, np.ndarray)):\n image_list = get_imagegrid(dataset,\n num=num,\n shuffle=shuffle)\n num = min(len(image_list), num)\n nrows = math.ceil(math.sqrt(num))\n ncols = math.ceil(num / nrows)\n if figsize is None:\n figsize = (2 * nrows, 2 * ncols)\n fig, axs = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols)\n axs = axs.flatten()\n for i, img in enumerate(image_list):\n show_image(img, axs[i], image_attr)", "def plot_gallery(images , h, w, n_row=3, n_col=6):\n plt.figure(figsize=(1.7 * n_col, 2.3 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(len(images)):\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n\n plt.xticks(())\n plt.yticks(())", "def show_images(imgs, num_rows, num_cols, scale=2):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = plt.subplots(num_rows, num_cols, figsize=figsize)\n for i in range(num_rows):\n for j in range(num_cols):\n axes[i][j].imshow(imgs[i * num_cols + j].asnumpy())\n axes[i][j].axes.get_xaxis().set_visible(False)\n axes[i][j].axes.get_yaxis().set_visible(False)\n return axes", "def display(self):\n nrow = 2\n ncol = len(self.views) + 1\n rows = [(self.views[0].original, len(self.views)),\n (self.views[0].image, len(self.views) + 1)]\n fig, axes = plt.subplots(nrows=nrow, ncols=ncol,\n figsize=self._figsize(rows),\n squeeze=True)\n originals = [(v.position.id, v.original) for v in self.views] + [\n ('combined', np.median(np.stack([v.original for v in self.views]), axis=0))]\n warped = [(v.position.id, v.image) for v in self.views] + [\n ('combined', self.image)]\n for ax, (title, img) in zip(axes.ravel(), originals + warped):\n ax.imshow(img)\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=title)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def plot_many_images(images, titles, rows=1, columns=2):\n for i, image in enumerate(images):\n plt.subplot(rows, columns, i + 1)\n plt.imshow(image, \"gray\")\n plt.title(titles[i])\n plt.xticks([]), plt.yticks([]) # Hide tick marks\n plt.show()", "def show_image_batch(b, show=show_titled_image, items=9, cols=3, figsize=None, **kwargs):\n rows = (items+cols-1) // cols\n if figsize is None: figsize = (cols*3, rows*3)\n fig,axs = plt.subplots(rows, cols, figsize=figsize)\n # for *o,ax in zip(*to_cpu(b), axs.flatten()): show(o, ax=ax, **kwargs)\n for *o,ax in zip(*to_cpu(b), axs.flatten()):\n show(o, ax=ax, **kwargs)", "def display_classes(png, images, classes, ncol=4):\n fig = plt.figure()\n nrow = len(images) / ncol\n if len(images) % ncol > 0: nrow = nrow + 1\n\n def draw(i):\n plt.subplot(nrow,ncol,i)\n plt.imshow(images[i].reshape(28,28), cmap='gray', interpolation='none')\n plt.title('Predicted: %s' % classes[i])\n [ draw(i) for i in range(0,len(images)) ]\n plt.tight_layout()\n plt.savefig(png)", "def showimage(image):\n mplt.figure()\n mplt.imshow(image)\n mplt.show()", "def visualize(viewer, img, colormaps=None):\n \n if colormaps == 'rgb':\n colormaps = [\n 'red',\n 'green',\n 'blue',\n ]\n # add successively all channels\n for i in range(img.shape[-1]):\n # avoid the alpha channel of RGB images\n if i == 3 and np.all(img[:, :, i] == 1):\n pass\n else:\n if colormaps is not None and i < len(colormaps):\n colormap = colormaps[i]\n else:\n colormap = 'gray'\n viewer.add_image(img[:, :, i], name='ch' + str(i), colormap=colormap, blending='additive')\n return", "def plot_images_grid(images, labels, title):\n images = images.cpu()\n labels = labels.cpu()\n \n assert type(images[0]) is torch.Tensor, 'Image to plot is not torch.Tensor'\n image_size = int(np.sqrt(images[0].shape[0]))\n \n fig = plt.figure(figsize=(10,4))\n for idx in range(10):\n ax = fig.add_subplot(2,10/2,idx+1, xticks=[], yticks=[])\n ax.imshow(images[idx].view(image_size, image_size), cmap = 'gray')\n label = labels[idx].item()\n ax.set_title(label)\n #end\n fig.suptitle(title, fontsize = 14)\n plt.show()\n plt.close('all')", "def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): #@save\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(d2l.numpy(img))\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes", "def grid(self, **kw):\n self.__imframe.grid(**kw) # place CanvasImage widget on the grid\n self.__imframe.grid(sticky='nswe') # make frame container sticky\n self.__imframe.rowconfigure(0, weight=1) # make canvas expandable\n self.__imframe.columnconfigure(0, weight=1)", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()", "def show_images(imgs, num_rows, num_cols, scale=2):\n figsize = (num_cols*scale, num_rows*scale)\n _, axes = plt.subplots(num_rows, num_cols, figsize=figsize)\n for i in range(num_rows):\n for j in range(num_cols):\n # show the target image\n axes[i][j].imshow(imgs[i*num_cols+j])\n # set the sub-axis to be invisible\n axes[i][j].axes.get_xaxis().set_visible(False)\n axes[i][j].axes.get_yaxis().set_visible(False)\n # remember to show figure at last\n plt.show()\n return axes", "def show(self):\r\n display(self.grid_part)", "def display_images_in_grids(vizs, nc, titles=None, export_fn=None, maintain_shape=True, pad_color='white',\n title_fontsize=10, **kwargs):\n\n if maintain_shape:\n if pad_color == 'white':\n pad_value = 255\n elif pad_color == 'black':\n pad_value = 0\n vizs = pad_patches_to_same_size(vizs, pad_value=pad_value)\n\n n = len(vizs)\n nr = int(np.ceil(n/float(nc)))\n aspect_ratio = vizs[0].shape[1]/float(vizs[0].shape[0]) # width / height\n\n fig, axes = plt.subplots(nr, nc, figsize=(nc*5*aspect_ratio, nr*5))\n axes = axes.flatten()\n\n for i in range(len(axes)):\n if i >= n:\n axes[i].axis('off');\n else:\n if vizs[i].dtype == np.float16:\n vizs[i] = vizs[i].astype(np.float32)\n axes[i].imshow(vizs[i], **kwargs);\n if titles is not None:\n axes[i].set_title(titles[i], fontsize=title_fontsize);\n axes[i].set_xticks([]);\n axes[i].set_yticks([]);\n\n fig.tight_layout();\n\n if export_fn is not None:\n create_if_not_exists(os.path.dirname(export_fn))\n plt.savefig(export_fn);\n plt.close(fig)\n else:\n plt.show();", "def display(self, grid):\n for i in range(grid.height):\n print(\"-\" + \"-------\"*grid.width)\n for j in range(grid.width):\n if not j:\n print(\"|\", end=\"\") # begin row with vertical line\n a = self.actions.get((i, j), ' ')\n print(\" %s |\" % a, end=\"\")\n print(\"\") # new line\n print(\"-\" + \"-------\"*grid.width, end='\\n\\n')", "def plot_images(imgs, layout, img_sz = 0.7, suptitle = ''):\n\tnrows, ncols = layout \n\tfig, axes = plt.subplots(nrows, ncols, \n\t\tfigsize = (img_sz * ncols, img_sz * nrows))\n\taxes = axes.ravel()\n\tfig.subplots_adjust(hspace = 0, wspace = 0)\n\tfig.suptitle(suptitle)\n\tfor i, img in enumerate(imgs):\n\t\taxes[i].get_xaxis().set_visible(False)\n\t\taxes[i].get_yaxis().set_visible(False)\n\t\taxes[i].imshow(img)", "def show_images(plate_full_name, well):\n if not IPYTHON:\n return\n\n src_dir = op.join(cp_config[\"Paths\"][\"SrcPath\"], plate_full_name)\n ctrl_images = load_control_images(src_dir)\n image_dir = op.join(src_dir, \"images\")\n templ_dict = {}\n for ch in range(1, 6):\n im = load_image(image_dir, well, ch)\n templ_dict[\"Img_{}_Cpd\".format(ch)] = img_tag(\n im, options='style=\"width: 250px;\"')\n templ_dict[\"Img_{}_Ctrl\".format(ch)] = ctrl_images[ch]\n tbody_templ = Template(cprt.IMAGES_TABLE)\n table = cprt.TABLE_INTRO + \\\n tbody_templ.substitute(templ_dict) + cprt.HTML_EXTRO\n return HTML(table)", "def show(self, screen):\n x_display = self.xy_position[0] * constants.CELL_SIZE\n y_display = self.xy_position[1] * constants.CELL_SIZE\n screen.blit(self.image, (x_display, y_display))", "def show_images_pyplot(images, titles, cols=1):\n assert len(images) == len(titles), 'Every image should have unique title!'\n n_images = len(images)\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images / float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.axis('off')\n plt.imshow(image)\n a.set_title(title)\n manager = plt.get_current_fig_manager()\n manager.resize(*manager.window.maxsize())\n plt.show()", "def show_torch_imgs(imgs, nrow=8, figsize=(8, 5), axis_off=True , **opt):\n import torchvision\n import torch\n if not torch.is_tensor(imgs):\n # Not a torch tensor. Assume that it is torch.autograd.Variable\n # Try to get the tensor inside the Variable.\n try:\n imgs = imgs.data\n except:\n raise ValueError('Expect input imgs to be a torch Tensor or torch.autograd.Variable.')\n # https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91\n img = torchvision.utils.make_grid(imgs, nrow=nrow, **opt)\n npimg = img.cpu().numpy()\n # make it height x width x channels\n npimg = np.transpose(npimg, (1, 2, 0))\n\n plt.figure(figsize=figsize)\n plt.imshow(npimg, interpolation='nearest')\n if axis_off:\n plt.axis('off')", "def plot_gallery(self,images, titles, h, w, n_row=5, n_col=5):\n plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())\n plt.show()", "def plot_final_grid(generated_images):\n\n fig = plt.figure(figsize=(GRID_SIZE, GRID_SIZE))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.001, hspace=0.001)\n\n for ind in range(GRID_IMGS):\n ax = fig.add_subplot(GRID_SIZE, GRID_SIZE, ind + 1, xticks=[], yticks=[])\n ax.imshow(np.uint8(((generated_images[ind] + 1) / 2) * 255), cmap='gray')\n\n plt.savefig('/content/drive/My Drive/WGAN/generated_image_grid.png')\n plt.savefig('/content/drive/My Drive/WGAN/results/WGAN.png')\n plt.show()", "def show_images(images, save=None, size=None):\n assert len(images) > 0, \"images should contain at least 1 element\"\n assert len(images[0].shape) == 3, \"each image should contain 3 elements (c, w,h)\"\n \n fig, ax = plt.subplots(nrows=images[0].shape[0], ncols=len(images))\n \n for i in range(len(images)): \n for j in range(images[0].shape[0]):\n ax[i,j].imshow(images[i][j,:,:], cmap='gray')\n \n plt.show()", "def displayImg(self):\r\n\r\n\t# If you want to skip n frames, set value to 0 to see all images\r\n\tSKIP = 4500\r\n for idx in range(len(self.centers)):\r\n\t if idx < SKIP:\r\n\t\tcontinue\r\n file_left = self.lefts[idx][5]\r\n file_center = self.centers[idx][5]\r\n file_right = self.rights[idx][5]\r\n\r\n img_left = cv2.imread(os.path.join(self.pathDir, file_left), \\\r\n cv2.IMREAD_COLOR)\r\n img_center = cv2.imread(os.path.join(self.pathDir, file_center), \\\r\n cv2.IMREAD_COLOR)\r\n img_right = cv2.imread(os.path.join(self.pathDir, file_right), \\\r\n cv2.IMREAD_COLOR)\r\n\r\n\t #Resize the image to 50%\r\n img_l = cv2.resize(img_left, None, fx=0.5, fy=0.5, \\\r\n interpolation = cv2.INTER_LINEAR)\r\n img_c = cv2.resize(img_center, None, fx=0.5, fy=0.5, \\\r\n interpolation = cv2.INTER_LINEAR)\r\n img_r = cv2.resize(img_right, None, fx=0.5, fy=0.5, \\\r\n interpolation = cv2.INTER_LINEAR)\r\n \r\n height, width = img_c.shape[:2]\r\n new_img = np.zeros((height, width*3, img_c.shape[2]),\r\n np.uint8)\r\n\r\n #Adding sequence numbers and Time\r\n\t #Left\r\n strTime = self.timestampToStr(self.lefts[idx][1])\r\n\t self.putTextToImg(img_l, self.lefts[idx][0], strTime, height)\r\n\t #Center\r\n\t strTime = self.timestampToStr(self.centers[idx][1])\r\n\t self.putTextToImg(img_c, self.centers[idx][0], strTime, height)\r\n\t #Right\r\n\t strTime = self.timestampToStr(self.rights[idx][1])\r\n\t self.putTextToImg(img_r, self.rights[idx][0], strTime, height)\r\n\t \r\n\t angle = float(self.angles_at_timestamps[idx])\r\n\t speed = float(self.speed_at_timestamps[idx])\r\n\r\n\t print \"speed: %f - angle: %f\" % (speed, angle)\r\n\r\n\t self.draw_path_on(img_c, speed, angle)\r\n\r\n\t #Generate the new image\r\n for i in range(height):\r\n new_img[i] = np.concatenate((img_l[i, : ], img_c[i, : ], \\\r\n img_r[i, : ]))\r\n \r\n\r\n cv2.imshow('Udacity Challenge 2 - Viewer', new_img)\r\n key = cv2.waitKey(30)\r\n\r\n # Press q to exit\r\n if key == ord('q'):\r\n break\r\n\r\n cv2.destroyAllWindows()", "def display_image(mat):\n\timg = Image.fromarray(mat)\n\timg.show()", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()\n # plt.savefig('./drive/My Drive/Colab Notebooks/TACK/Large/result' + ' '.join(name.split('_')).title() + '.png')", "def plot_gallery(images, titles, h, w, n_row=3, n_col=4):\n pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n pl.subplot(n_row, n_col, i + 1)\n pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)\n pl.title(titles[i], size=12)\n pl.xticks(())\n pl.yticks(())", "def display(self):\n nrow = 1\n ncol = len(self.views) + 1\n rows = [(self.views[0].image, len(self.views) + 1)]\n fig, axes = plt.subplots(nrows=nrow, ncols=ncol,\n figsize=self._figsize(rows),\n squeeze=True)\n for ax, (title, img) in zip(axes.ravel(),\n [(v.position.id, v.image) for v in self.views] + [\n ('combined', self.image)]):\n ax.imshow(img)\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=title)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def visualize_MTL(**images):\r\n n = len(images)\r\n plt.figure(figsize=(16, 5))\r\n for i, (name, image) in enumerate(images.items()):\r\n if image==None:\r\n continue\r\n else:\r\n plt.subplot(1, n, i + 1)\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.title(' '.join(name.split('_')).title())\r\n plt.imshow(image)\r\n plt.show()", "def images_formatter(imgs, col=2, width=96):\n # https://www.kaggle.com/stassl/displaying-inline-images-in-pandas-dataframe\n if len(imgs) < col:\n html = u'<div class=\"df\">'\n else:\n html = u'<div class=\"df\" style=\"width: {}px\">'.format(col * width + 15)\n\n for i in range(len(imgs)):\n # html += '<div class=\"imgs\" style=\"display: inline-block; width: {}px\">'.format(width+30)\n html += '<div class=\"imgs\" style=\"display: inline-block; max-width: {}px\">'.format(width + 10)\n img64 = image_base64(imgs[i], resize=width)\n # print(img64)\n img_tag = '<img src=\"data:image/jpeg;base64,{}\">'.format(img64)\n # img_tag = f'<img src=\"data:image/jpeg;base64,{img64}\">'\n html += img_tag + \" </div> \"\n\n if (i + 1) % col == 0:\n html += \"<br>\"\n # print(html)\n return html + \"</div>\"", "def plot_gallery2(images, titles, h, w, n_row=3, n_col=4):\n pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n pl.subplot(n_row, n_col, i + 1)\n im = images[i].reshape((h, w, 3))\n # Normalize image to 0..1 range for visualization\n if im.dtype.name == 'float64':\n m0 = np.min(im)\n m1 = np.max(im)\n im = (im - m0) / (m1 - m0)\n pl.imshow(im)\n pl.title(titles[i], size=12)\n pl.xticks(())\n pl.yticks(())", "def plot_final_grid(generated_images):\n\n fig = plt.figure(figsize=(8, 6))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.001, hspace=0.001)\n\n for ind in range(GRID_IMGS):\n ax = fig.add_subplot(GRID_SIZE, GRID_SIZE, ind + 1, xticks=[], yticks=[])\n ax.imshow(np.uint8(((generated_images[ind] + 1) / 2) * 255), cmap='gray')\n\n plt.savefig('/content/drive/My Drive/WGAN/generated_image_grid.png')\n plt.savefig('/content/drive/My Drive/WGAN/results/DCGAN.png')\n plt.show()", "def render_image(grid,window):\r\n X = len(grid[0])\r\n Y = len(grid)\r\n#top row:\r\n for j in range(Y):\r\n for sub_j in range(3): #3 rows \r\n ROW = []\r\n for i in range(X):\r\n ROW += grid[j][i].arr[sub_j]\r\n \r\n for k in range(len(ROW)):\r\n COLOR = (ROW[k],ROW[k],ROW[k])\r\n Y_pos = (3*j + sub_j)*pixel_size*scale\r\n X_pos = k*(pixel_size)*scale\r\n width = height = pixel_size*scale\r\n pygame.draw.rect(window,COLOR,(X_pos,Y_pos,width,height))\r\n \r\n# print(ROW)\r\n return", "def draw_grid(self, tile_img, tiles):\n #debug_print(\"drawing level\", data)\n img = Surface((self.xsize * SIZE, self.ysize * SIZE))\n for pos, char in self:\n rect = get_tile_rect(pos)\n img.blit(tile_img, rect, tiles[char])\n return img", "def display_similar_for_img(img_dir, img_name,img_map, indices, disp_num):\n images = get_similar_imgs(img_name, img_map, indices, disp_num, img_dir)\n for img in images:\n display(Image(img))", "def test_display_methods_with_display_mode_tiled(img_3d_mni):\n display = plot_img(img_3d_mni, display_mode=\"tiled\")\n display.add_overlay(img_3d_mni, threshold=0)\n display.add_edges(img_3d_mni, color=\"c\")\n display.add_contours(\n img_3d_mni, contours=2, linewidth=4, colors=[\"limegreen\", \"yellow\"]\n )", "def grid_image(output):\n grid = []\n for data in output:\n grid += [make_grid(data, nrow=5, normalize=True)]\n return grid", "def plot_grid(im_list, grid_shape, scale=0.1, axes_pad=0.07):\r\n # https://gist.github.com/lebedov/7018889ba47668c64bcf96aee82caec0\r\n\r\n # Grid must be 2D:\r\n assert len(grid_shape) == 2\r\n\r\n # Make sure all images can fit in grid:\r\n assert np.prod(grid_shape) >= len(im_list)\r\n\r\n grid = ImageGrid(plt.gcf(), 111, grid_shape, axes_pad=axes_pad)\r\n for i, data in enumerate(im_list):\r\n\r\n # Scale image:\r\n im = PIL.Image.fromarray(data)\r\n thumb_shape = [int(scale*j) for j in im.size]\r\n im.thumbnail(thumb_shape, PIL.Image.ANTIALIAS)\r\n data_thumb = np.array(im)\r\n grid[i].plot_nnua(data_thumb)\r\n\r\n # Turn off axes:\r\n grid[i].axes.get_xaxis().set_visible(False)\r\n grid[i].axes.get_yaxis().set_visible(False)", "def show_imgs(dataset, n_imgs, plot_size=(15, 15), cmap=None):\n n_cols = int(np.sqrt(n_imgs))\n n_rows = int(np.ceil(np.sqrt(n_imgs)))\n class_idx = dataset.class_to_idx\n idx_class = idx_to_class(class_idx)\n\n fig, axes = plt.subplots(n_rows, n_cols, figsize=plot_size)\n for i, ax in enumerate(axes.flatten()):\n ax.axis('off')\n title = f'Class : {idx_class[dataset.targets[i]]}'\n ax.imshow(dataset.data[i], cmap=cmap)\n ax.set_title(title)\n fig.tight_layout()", "def show_four_images(img1, img2, img3, img4, title):\n shape = (460, 250)\n # Get all images in same size for better display\n img1 = cv2.resize(img1, shape)\n img2 = cv2.resize(img2, shape)\n img3 = cv2.resize(img3, shape)\n img4 = cv2.resize(img4, shape)\n # combined 2 images horizontally\n numpy_horizontal1 = np.hstack((img1, img2))\n # combined the rest 2 images horizontally\n numpy_horizontal2 = np.hstack((img3, img4))\n # now combined all vertically to 1 image and display\n numpy_vertical = np.vstack((numpy_horizontal1, numpy_horizontal2))\n # final thing - show the output:\n show_image(numpy_vertical, title)", "def build_deck_screen_grid_display(grid, screen):\n screen.blit(grid.build_deck_screen_card_gallery_grid, grid.build_deck_screen_card_gallery_grid_rect)\n screen.blit(grid.build_deck_screen_deck_grid, grid.build_deck_screen_deck_grid_rect)", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def draw_grid(self):\n plt.imshow(py.array(\n map(lambda x: map(lambda y: mplc.colorConverter.to_rgb(colord[y]), x), self.create_grid(self.graph))),\n interpolation='nearest')\n plt.show()", "def display_grid(grid):\n\n\tprint(\"\"\"\n 0 1 2 3 4 5 6 7\n\t \n ▼ ▼ ▼ ▼ ▼ ▼ ▼ ▼ \"\"\", colors.BOLD + \"(X)\" + colors.STOP, end = '')\n\n\tprint('\\n\\n')\n\n\trow = 0\n\n\tfor i in range(8):\n\t\tprint(' ', row, ' ▶ ', end = ' ')\n\t\tfor j in range(8):\n\t\t\tprint(grid[j,i], end = ' ')\n\t\tprint('\\n\\n')\n\t\trow += 1\n\n\tprint(colors.BOLD + ' (Y)\\n' + colors.STOP)", "def visualize(**images):\r\n n_images = len(images)\r\n plt.figure(figsize=(20, 8))\r\n for idx, (name, image) in enumerate(images.items()):\r\n plt.subplot(1, n_images, idx + 1)\r\n plt.xticks([])\r\n plt.yticks([])\r\n # get title from the parameter names\r\n plt.title(name.replace('_', ' ').title(), fontsize=20)\r\n plt.imshow(image)\r\n plt.show()", "def draw_images(images, # type: thelper.typedefs.OneOrManyArrayType\n captions=None, # type: Optional[List[str]]\n redraw=None, # type: Optional[thelper.typedefs.DrawingType]\n show=True, # type: Optional[bool]\n block=False, # type: Optional[bool]\n use_cv2=True, # type: Optional[bool]\n cv2_flip_bgr=True, # type: Optional[bool]\n img_shape=None, # type: Optional[thelper.typedefs.ArrayShapeType]\n max_img_size=None, # type: Optional[thelper.typedefs.ArrayShapeType]\n grid_size_x=None, # type: Optional[int]\n grid_size_y=None, # type: Optional[int]\n caption_opts=None,\n window_name=None, # type: Optional[str]\n ): # type: (...) -> thelper.typedefs.DrawingType\n nb_imgs = len(images) if isinstance(images, list) else images.shape[0]\n if nb_imgs < 1:\n return None\n assert captions is None or len(captions) == nb_imgs, \"captions count mismatch with image count\"\n # for display on typical monitors... (height, width)\n max_img_size = (800, 1600) if max_img_size is None else max_img_size\n grid_size_x = int(math.ceil(math.sqrt(nb_imgs))) if grid_size_x is None else grid_size_x\n grid_size_y = int(math.ceil(nb_imgs / grid_size_x)) if grid_size_y is None else grid_size_y\n assert grid_size_x * grid_size_y >= nb_imgs, f\"bad gridding for subplots (need at least {nb_imgs} tiles)\"\n if use_cv2:\n if caption_opts is None:\n caption_opts = {\n \"org\": (10, 40),\n \"fontFace\": cv.FONT_HERSHEY_SIMPLEX,\n \"fontScale\": 0.40,\n \"color\": (255, 255, 255),\n \"thickness\": 1,\n \"lineType\": cv.LINE_AA\n }\n if window_name is None:\n window_name = \"images\"\n img_grid_shape = None\n img_grid = None if redraw is None else redraw[1]\n for img_idx in range(nb_imgs):\n image = images[img_idx] if isinstance(images, list) else images[img_idx, ...]\n if img_shape is None:\n img_shape = image.shape\n if img_grid_shape is None:\n img_grid_shape = (img_shape[0] * grid_size_y, img_shape[1] * grid_size_x, img_shape[2])\n if img_grid is None or img_grid.shape != img_grid_shape:\n img_grid = np.zeros(img_grid_shape, dtype=np.uint8)\n if image.shape[2] != img_shape[2]:\n raise AssertionError(f\"unexpected image depth ({image.shape[2]} vs {img_shape[2]})\")\n if image.shape != img_shape:\n image = cv.resize(image, (img_shape[1], img_shape[0]), interpolation=cv.INTER_NEAREST)\n if captions is not None and str(captions[img_idx]):\n image = cv.putText(image.copy(), str(captions[img_idx]), **caption_opts)\n offsets = (img_idx // grid_size_x) * img_shape[0], (img_idx % grid_size_x) * img_shape[1]\n np.copyto(img_grid[offsets[0]:(offsets[0] + img_shape[0]), offsets[1]:(offsets[1] + img_shape[1]), :], image)\n win_name = str(window_name) if redraw is None else redraw[0]\n if img_grid is not None:\n display = img_grid[..., ::-1] if cv2_flip_bgr else img_grid\n if display.shape[0] > max_img_size[0] or display.shape[1] > max_img_size[1]:\n if display.shape[0] / max_img_size[0] > display.shape[1] / max_img_size[1]:\n dsize = (max_img_size[0], int(round(display.shape[1] / (display.shape[0] / max_img_size[0]))))\n else:\n dsize = (int(round(display.shape[0] / (display.shape[1] / max_img_size[1]))), max_img_size[1])\n display = cv.resize(display, (dsize[1], dsize[0]))\n if show:\n cv.imshow(win_name, display)\n cv.waitKey(0 if block else 1)\n return win_name, img_grid\n else:\n fig, axes = redraw if redraw is not None else plt.subplots(grid_size_y, grid_size_x)\n if nb_imgs == 1:\n axes = np.array(axes)\n for ax_idx, ax in enumerate(axes.reshape(-1)):\n if ax_idx < nb_imgs:\n image = images[ax_idx] if isinstance(images, list) else images[ax_idx, ...]\n if image.shape != img_shape:\n image = cv.resize(image, (img_shape[1], img_shape[0]), interpolation=cv.INTER_NEAREST)\n ax.imshow(image, interpolation='nearest')\n if captions is not None and str(captions[ax_idx]):\n ax.set_xlabel(str(captions[ax_idx]))\n ax.set_xticks([])\n ax.set_yticks([])\n fig.set_tight_layout(True)\n if show:\n fig.show()\n if block:\n plt.show(block=block)\n return None\n plt.pause(0.5)\n return fig, axes", "def plot_image_grid(ax, images, n=20, m=None, img_rows=28, img_cols=28):\n if m is None:\n m = n\n \n grid = images[:n*m].reshape(n, m, img_rows, img_cols)\n\n return ax.imshow(np.vstack(np.dstack(grid)), cmap='gray')", "def plot_gallery(images, titles, h, w, n_row=4, n_col=4):\n plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(len(images)):\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())", "def plot_image_grid(epoch, generated_images):\n\n fig = plt.figure(figsize=(GRID_SIZE, GRID_SIZE))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.001, hspace=0.001)\n\n for ind in range(GRID_IMGS):\n ax = fig.add_subplot(GRID_SIZE, GRID_SIZE, ind + 1, xticks=[], yticks=[])\n ax.imshow(np.uint8(((generated_images[ind] + 1) / 2) * 255), cmap='gray')\n\n plt.savefig('/content/drive/My Drive/WGAN/train_images/image_iteration_{:05d}.png'.format(epoch + 1))\n plt.close()", "def plot_gallery(images, titles, n_row=3, n_col=4):\n plt.figure(figsize=(2.5 * n_col, 2.4 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35, wspace = .35)\n for i in range(n_row * n_col):\n plt.subplot(n_row, n_col, i + 1)\n #plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n # image = images[i].reshape(h, w)\n # img = []\n # for j in range(len(image)):\n # img.append(list(image[j]))\n # #print(img[:5])\n plt.imshow(images[i])\n #plt.imshow(images[i])\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())", "def view_images(dataset, size):\n images, labels = dataset\n assert images.shape[0] == labels.shape[0]\n\n num_images = images.shape[0]\n num_cols = 3\n num_rows = np.ceil(num_images / num_cols).astype(\"int\")\n plt.figure(figsize=size)\n for i in range(num_images):\n image = images[i]\n label = labels[i]\n ax = plt.subplot(num_rows, num_cols, i + 1)\n plt.imshow(np.array(image, dtype=\"float\"))\n plt.title(\"Number: \" + str(label))\n plt.axis(\"off\")", "def show_images(images, db):\n images = [int(image) for image in images]\n files = get_img_files(images, db)\n show_files(files)", "def plot_gallery(images, titles, h, w, n_row=3, n_col=4):\n plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())\n plt.show()\n plt.savefig(\"feature_{}.png\".format(50))", "def __translate(self, img):\n if not isinstance(img, Image):\n raise InvalidImageTypeException(\"display_images only accepts objects of type Image\")\n\n w = img.width()\n h = img.height()\n tkimg = Tkinter.PhotoImage(width=w, height=h)\n for x in range(w):\n for y in range(h):\n tkimg.put('#%02x%02x%02x' % img.get_rgb(x, y), (x, y))\n return tkimg", "def visualization(data, rows, cols, titles, figsize):\n fig, ax = plt.subplots(nrows=rows, ncols=cols, figsize=figsize)\n # plot image on each subplot\n for i, axi in enumerate(ax.flat):\n # i is in range [0, nrows * ncols)\n # axi is equivalent to ax[rowid][colid]\n axi.imshow(data[i])\n axi.set_title(titles[i])\n plt.tight_layout(True)\n plt.show()" ]
[ "0.75970733", "0.70164585", "0.69525635", "0.6862031", "0.6858415", "0.6740404", "0.67095673", "0.67095673", "0.6658414", "0.6656997", "0.6640771", "0.6611372", "0.6593967", "0.6576421", "0.65030444", "0.64479935", "0.6447659", "0.6405609", "0.63985646", "0.63723314", "0.63717556", "0.63700026", "0.6365389", "0.6332115", "0.63106704", "0.629433", "0.629433", "0.62894386", "0.6281625", "0.62770784", "0.6268044", "0.62670314", "0.62667876", "0.6262749", "0.62577426", "0.6251552", "0.6243719", "0.6233195", "0.62327635", "0.62327635", "0.62289727", "0.62196094", "0.620803", "0.61760247", "0.617187", "0.61517745", "0.6151703", "0.6145985", "0.6135726", "0.61291945", "0.6128007", "0.6127429", "0.61203253", "0.6120144", "0.6120144", "0.6120144", "0.61111516", "0.6107616", "0.6106858", "0.6102635", "0.61007226", "0.609111", "0.60891366", "0.6071942", "0.60692555", "0.6053033", "0.60501885", "0.60493666", "0.6045125", "0.60403883", "0.60349727", "0.60278404", "0.60214084", "0.6019916", "0.6007871", "0.60074484", "0.60067093", "0.59986806", "0.5994845", "0.59888726", "0.5967013", "0.5965082", "0.59593755", "0.5955322", "0.5954181", "0.5946782", "0.5937751", "0.5936804", "0.59265095", "0.59247637", "0.5919362", "0.59170794", "0.59070766", "0.5899942", "0.58996105", "0.58847046", "0.5881955", "0.5875327", "0.58657336", "0.58602756" ]
0.68058664
5
This function calculates correlation
def correlation(C): if type(C) is not np.ndarray: raise TypeError('C must be a numpy.ndarray') if len(C.shape) < 2 or C.shape[0] is not C.shape[1]: raise ValueError('C must be a 2D square matrix') return C / np.sqrt(np.outer(np.diagonal(C), np.diagonal(C)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_correlation(data):\n pass", "def correlation(self, other):\n dates=self.get_dates(other.get_dates())\n #print(len(self.get_values(dates)))\n #print(len(other.get_values(dates)))\n #print(self.get_values(dates))\n r,p=stats.pearsonr(self.get_values(dates), other.get_values(dates))\n return r", "def Corr(x,y):\n \n cocoeff1 = np.empty((y.shape[1],y.shape[2]))\n cocoeff2 = np.empty((y.shape[1],y.shape[2]))\n for i in xrange(y.shape[1]):\n for j in xrange(y.shape[2]):\n cocoeff1[i,j],cocoeff2[i,j] = sts.pearsonr(x[:,i,j],y[:,i,j])\n \n print 'Completed: Correlation calculations!'\n \n return cocoeff1, cocoeff2", "def correlation(result, reference):\n \n r = np.corrcoef(result, reference)[0,1]\n \n return r", "def calculate_correlation(self):\n self.network.index_nodes()\n self._calculate_dist()\n pearson_correlation, pearson_pvalue = scipy.stats.pearsonr(self.dist[:,0], self.dist[:,1])\n spearman_correlation, spearman_pvalue = scipy.stats.spearmanr(self.dist[:,0], self.dist[:,1])\n return pearson_correlation, pearson_pvalue, spearman_correlation, spearman_pvalue", "def calculate_r_corr(self):\n\n r_corr_real_x = self._dx * self.__find_r_corr_in_points(self._autocorr_real_x)\n r_corr_real_y = self._dy * self.__find_r_corr_in_points(self._autocorr_real_y)\n r_corr_imag_x = self._dx * self.__find_r_corr_in_points(self._autocorr_imag_x)\n r_corr_imag_y = self._dy * self.__find_r_corr_in_points(self._autocorr_imag_y)\n\n # Returns mean of calculated correlation radii\n return mean([r_corr_real_x, r_corr_real_y, r_corr_imag_x, r_corr_imag_y])", "def compute_corr(self):\n self.corr_ = cov_to_corr(self.cov_)\n return self", "def corr(self):\n pass", "def _pearson_correlation_coeff(x_data, y_data):\n reg = linregress(x_data, y_data)\n return reg.rvalue", "def corr(arr1, arr2):\n\n\n X = []\n Y = []\n for index in range(len(arr1)):\n if arr1[index] == None or arr2[index] == None:\n continue\n X.append(arr1[index])\n Y.append(arr2[index])\n\n\n r = np.corrcoef(X, Y)[0,1]\n f = 0.5*np.log((1+r)/(1-r))\n se = 1/np.sqrt(len(X)-3)\n ucl = f + 2*se\n lcl = f - 2*se\n\n lcl = (np.exp(2*lcl) - 1) / (np.exp(2*lcl) + 1)\n ucl = (np.exp(2*ucl) - 1) / (np.exp(2*ucl) + 1)\n\n return r,lcl,ucl", "def correlation(x, y):\n return covariance(x, y) / (sd(x) * sd(y))", "def mcorr(x,y):\n return ((np.ma.dot(x,y) / (x.shape[0] - 1) / y.std(axis=0)) / x.std())", "def determine_correlation(var1,var2):\n v1 = np.array(var1)\n v2 = np.array(var2)\n mat = np.c_[(v1,v2)]# np.vstack((v1,v2)) #\n corr = np.corrcoef(mat.T)\n return corr[0][1]", "def corrcoef(self):\r\n return np.corrcoef(self.input.data)", "def calculate_feature_corr(self):\n \n return self.train_data.astype(float).corr(method='kendall')", "def cor(x, y):\n scaler = TimeSeriesScalerMeanVariance()\n x_norm = scaler.fit_transform(x)\n y_norm = scaler.fit_transform(y)\n pcc = np.mean(x_norm * y_norm) # Pearson correlation coefficients\n d = np.sqrt(2.0 * (1.0 - pcc + 1e-9)) # correlation-based similarities\n return np.sum(d)", "def _pearson_corrcoef_compute(var_x: Tensor, var_y: Tensor, corr_xy: Tensor, nb: Tensor) ->Tensor:\n var_x /= nb - 1\n var_y /= nb - 1\n corr_xy /= nb - 1\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()\n return torch.clamp(corrcoef, -1.0, 1.0)", "def _calculate_correlation(self, anomaly):\n if self.silence_level <= 1:\n print(\"Calculating partial correlation matrix at zero lag from \"\n \"anomaly values...\")\n\n # Calculate the correlation matrix, cast to float64 for precise\n # calculation of inverse matrix.\n C = np.corrcoef(anomaly.transpose()).astype(\"float64\")\n\n # Calculate the inverse correlation matrix\n if np.linalg.det(C) != 0.0:\n C_inv = np.linalg.inv(C)\n else:\n C_inv = np.linalg.pinv(C)\n\n # Clean up\n del C\n\n # Get the diagonal of the inverse correlation matrix\n diag = C_inv.diagonal()[:]\n\n # Calculate matrix of normalizations\n norm = np.sqrt(abs(np.outer(diag, diag)))\n\n return - C_inv / norm", "def _get_correlation(self, user1_id, user2_id):\n shared_ratings = self.get_shared_ratings(user1_id, user2_id)\n\n # Substract means for both users\n shared_ratings['rating_x'] -= self.get_mean_user_rating(user1_id)\n shared_ratings['rating_y'] -= self.get_mean_user_rating(user2_id)\n\n # Compute correlation as inverse of disparity\n disparity = (shared_ratings['rating_x'] - shared_ratings['rating_y']).abs().mean()\n return 1.0/disparity", "def test_correlation(self):\r\n x = [1, 2, 3, 5]\r\n y = [0, 0, 0, 0]\r\n z = [1, 1, 1, 1]\r\n a = [2, 4, 6, 8]\r\n b = [1.5, 1.4, 1.2, 1.1]\r\n c = [15, 10, 5, 20]\r\n\r\n bad = [1, 2, 3] # originally gave r = 1.0000000002\r\n\r\n self.assertFloatEqual(correlation(x, x), (1, 0))\r\n self.assertFloatEqual(correlation(x, y), (0, 1))\r\n self.assertFloatEqual(correlation(y, z), (0, 1))\r\n self.assertFloatEqualAbs(correlation(x, a), (0.9827076, 0.01729), 1e-5)\r\n self.assertFloatEqualAbs(\r\n correlation(x, b), (-0.9621405, 0.03786), 1e-5)\r\n self.assertFloatEqualAbs(correlation(x, c), (0.3779645, 0.622), 1e-3)\r\n self.assertEqual(correlation(bad, bad), (1, 0))", "def calculate_correlation(df, vars_to_corr, target_var) :\n\n\n mean = df[target_var].mean()\n sigma = df[target_var].std()\n\n correlation = []\n error = []\n\n for j in vars_to_corr :\n mean_j = df[j].mean()\n sigma_j = df[j].std()\n\n cov = (df[j] - mean_j) * (df[target_var] - mean) / (sigma*sigma_j)\n correlation.append(cov.mean())\n error.append(sem(cov))\n\n return correlation, error", "def correlation(data):\n return corrcoef(np.transpose(np.reshape(data, ((data.shape[0] * data.shape[1]), data.shape[2]))))", "def correlation(quantum_dict,amber_dict):\n quantum = []\n amber = []\n for key in quantum_dict:\n quantum.append(float(quantum_dict[key]))\n amber.append(float(amber_dict[key]))\n #calculation of Pearson r\n r2 = (stats.pearsonr(quantum,amber)[0])**2\n #save on a file and print it out\n r_file = open(\"correlation.dat\",\"w\")\n r_file.write(\"Correlation between quantum and amber energies:\\n\")\n r_file.write(\"%.2f\" % r2)\n r_file.close()\n print(\"Correlation between quantum and amber energies:\\n\")\n print(r2)\n return r2", "def correlation(x, y):\n stdev_x = standard_deviation(x)\n stdev_y = standard_deviation(y)\n if stdev_x > 0 and stdev_y > 0:\n return covariance(x, y) / stdev_x / stdev_y\n else:\n return 0", "def correlation(x, y):\n stdev_x = standard_deviation(x)\n stdev_y = standard_deviation(y)\n if stdev_x > 0 and stdev_y > 0:\n return covariance(x, y) / stdev_x / stdev_y\n else:\n return 0", "def get_correlation(df):\n frame_correlation = df.corr()\n return frame_correlation", "def getcorrelation(movieid1,movieid2):\n\n #the initialized integer, cosine_sum, has an initialized value of -100\n #such that in the case where correlation shouldn't be updated, the value\n #remains unchanged\n cosine_sum = NEGATIVE\n #variable r_a,i and r_b,i in the formula\n r_mv1 = 0\n r_mv2 = 0\n #numerator\n nume_sum = 0\n #two parts in the denominator (before taking square root)\n deno_mv1_sum = 0\n deno_mv2_sum = 0\n denominator = 0\n #variable that keeps track of count of common users\n currentCommon = 0\n\n #firstly check if the count of user passes the threshold for each movie\n if(len(dictMovie.get(movieid1))<threshold or\n len(dictMovie.get(movieid2))<threshold):\n #if either does not, returns a negative correlation (to be invalid)\n return cosine_sum\n #if both pass threshold, get the intersection (of users) of two movies\n else:\n intersect=dictMovie.get(movieid1).intersection(dictMovie.get(movieid2))\n #if the number of common users is smaller than threshold, return\n if (len(intersect) < threshold):\n return cosine_sum\n #otherwise, start counting correlation\n else:\n #get the average rating of two movies\n mv1_bar = float(dictMovieRate.get(movieid1))\n mv2_bar = float(dictMovieRate.get(movieid2))\n #iterate through common users and use formula\n for commonuser in intersect:\n #increment common user count\n currentCommon += 1\n r_mv1 = int(dictUser.get(commonuser).get(movieid1))\n r_mv2 = int(dictUser.get(commonuser).get(movieid2))\n nume_sum += ( (r_mv1)-mv1_bar )*( (r_mv2)-mv2_bar )\n deno_mv1_sum += ( (r_mv1)-mv1_bar )**2\n deno_mv2_sum += ( (r_mv2)-mv2_bar )**2\n #when done with denominator separate calculation, combine\n denominator = math.sqrt(deno_mv1_sum * deno_mv2_sum)\n #handle the case where denominator=0 (invalid)\n if denominator == 0:\n return cosine_sum\n #otherwise, successful. return valid values and pass in\n #common count to global variable for program to catch\n else:\n cosine_sum = nume_sum / denominator\n global currentCommonCount\n currentCommonCount = currentCommon\n return cosine_sum", "def computeCorr(pred_act,responses):\n\n num_pres,num_neurons = np.shape(responses)\n corr=np.zeros(num_neurons)\n \n for i in xrange(0,num_neurons):\n if np.all(pred_act[:,i]==0) & np.all(responses[:,i]==0):\n corr[i]=1.\n elif not(np.all(pred_act[:,i]==0) | np.all(responses[:,i]==0)):\n # /!\\ To prevent errors due to very low values during computation of correlation\n if abs(pred_act[:,i]).max()<1:\n pred_act[:,i]=pred_act[:,i]/abs(pred_act[:,i]).max()\n if abs(responses[:,i]).max()<1:\n responses[:,i]=responses[:,i]/abs(responses[:,i]).max() \n corr[i]=pearsonr(np.array(responses)[:,i].flatten(),np.array(pred_act)[:,i].flatten())[0]\n \n return corr", "def correlation(self) -> List[float]:\n self.pearson_corr = self.sim_data[\"Human (mean)\"].corr(self.sim_data[\"assigned_sim\"], method=\"pearson\")\n self.spearman_corr = self.sim_data[\"Human (mean)\"].corr(self.sim_data[\"assigned_sim\"], method=\"spearman\")\n return [self.pearson_corr, self.spearman_corr]", "def correlation(x_items, y_items):\r\n return correlation_test(x_items, y_items, method='pearson', tails=None,\r\n permutations=0)[:2]", "def correlation(row):\n return row['correlation']", "def los_corr(self, *args):\n\n\t\tif isinstance(args[0], np.ndarray):\n\t\t\tlonh, lath = np.deg2rad(self.heliographic(args[0]))\n\t\telse:\n\t\t\tlonh, lath = np.deg2rad(self.heliographic(args[0], args[1]))\n\n\t\tXobs = np.cos(np.deg2rad(B0))*np.cos(np.deg2rad(self.L0))\n\t\tYobs = np.cos(np.deg2rad(B0))*np.sin(np.deg2rad(self.L0))\n\t\tZobs = np.sin(np.deg2rad(B0))\n\n\t\tcorr_factor = (np.cos(lath)*np.cos(lonh)*Xobs\n\t\t\t\t\t + np.cos(lath)*np.sin(lonh)*Yobs\n\t\t\t\t\t + np.sin(lath)*Zobs)\n\t\tif isinstance(args[0], np.ndarray):\n\t\t\tself.im_corr = self.im_raw.data/corr_factor\n\t\t\treturn self.im_corr\n\t\telse:\n\t\t\treturn self.im_raw.data[args[0], args[1]]/corr_factor", "def _compute_correlations(self, data):\n mappings = self.mappings_\n n_channels, n_times = data.shape\n\n # get the predictions\n y_pred = data.T.dot(mappings.T)\n y_pred = y_pred.reshape((n_times, len(self.picks),\n self.n_resample), order='F')\n # pool them using median\n # XXX: weird that original implementation sorts and takes middle value.\n # Isn't really the median if n_resample even\n y_pred = np.median(y_pred, axis=-1)\n # compute correlation\n num = np.sum(data.T * y_pred, axis=0)\n denom = (np.sqrt(np.sum(data.T ** 2, axis=0)) *\n np.sqrt(np.sum(y_pred ** 2, axis=0)))\n\n corr = num / denom\n return corr", "def calculate_correlation_coefficient(column1: pd.Series, column2: pd.Series) -> np.float64:\n\n corr = column1.corr(column2)\n return corr", "def mp_corr(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n # only one (diagonal) cell is non-zero\n return 0.5\n elif b == n or c == n:\n # only one (non-diagonal) cell is non-zero\n return -0.5\n\n return _div(2 * self.covar(), p1 * q1 + p2 * q2)", "def get_corr(self):\r\n cov = self.data.values\r\n with np.errstate(divide='ignore', invalid='ignore'):\r\n coeff = np.true_divide(1, self.get_std().values)\r\n coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN\r\n corr = np.multiply(np.multiply(cov, coeff).T, coeff)\r\n df = pd.DataFrame(\r\n corr,\r\n index=self.data.index,\r\n columns=self.data.columns,\r\n )\r\n return self.__class__(df)", "def _calc_corr(dbal, benchmark_dbal, window):\n ret = dbal.pct_change()\n benchmark_ret = benchmark_dbal.pct_change()\n corr = ret.rolling(window).corr(benchmark_ret)\n return corr", "def correlation_test(x1, x2):\r\n x = pd.DataFrame([x1, x2]).T.dropna().values\r\n return pearsonr(x[:, 0], x[:, 1])", "def calc_qcorr(self) -> Dict[int, float]:\n return self._calc_qcorr", "def correlateX(X, y, corr=\"spearman\"):\n \n X = np.array(X)\n y = np.array(y)\n ## Force... just in case\n \n checkX(X)\n\n if corr == \"pearson\":\n corrf = pearsonr\n elif corr == \"spearman\":\n corrf = spearmanr\n else:\n raise ValueError(\"stat was not valid.\")\n \n corrs = []\n ps = []\n for jj in range(X.shape[1]):\n r, p = corrf(X[:,jj], y)\n corrs.append(r)\n ps.append(p)\n \n return np.array(corrs), np.array(ps)", "def calc_corr(f, f_unc, g, g_unc, fac, fac_unc):\n # calculate the covariance between f and g\n # cov(f,g) = (df/dfac) * (dg/dfac) * fac_unc**2\n # in this case f=x/fac, g=y/fac\n # so cov(f,g) = f * g * fac_unc**2 / fac**2\n cov = f * g * fac_unc ** 2 / fac ** 2\n\n # calculate and return the correlation coefficient between f and g\n # corr = cov(f,g) / (f_unc * g_unc)\n return cov / (f_unc * g_unc)", "def pearsonCorrelation(x, y):\n\tsum_sq_x = 0\n\tsum_sq_y = 0\n\tsum_coproduct = 0\n\tmean_x = x[0]\n\tmean_y = y[0]\n\tif len(x) != len(y):\n\t\traise StatsError(\"Data sets are of different lengths.\")\n\tn = len(x)\n\tfor i in range(1,n):\n\t\tsweep = i / (i+1.0)\n\t\tdelta_x = x[i] - mean_x\n\t\tdelta_y = y[i] - mean_y\n\t\tsum_sq_x += delta_x * delta_x * sweep\n\t\tsum_sq_y += delta_y * delta_y * sweep\n\t\tsum_coproduct += delta_x * delta_y * sweep\n\t\tmean_x += delta_x / (i+1.0)\n\t\tmean_y += delta_y / (i+1.0)\n\tpop_sd_x = math.sqrt( sum_sq_x / n )\n\tpop_sd_y = math.sqrt( sum_sq_y / n )\n\tcov_x_y = sum_coproduct / n\n\tr = cov_x_y / (pop_sd_x * pop_sd_y)\n\tz = math.fabs(r) * math.sqrt(n) / math.sqrt(2.0)\n\tp = Prob_Z(z)\n\tif not (0.0 <= p <= 1.0):\n\t\traise StatsError(\"Invalid P-value of %r.\" % r)\n\treturn (r, p, n)", "def correlation(G, variables = [], conditionants = []):\n \n cov = covariance(G, variables = variables, \n conditionants = conditionants)\n k = cov.shape[0]\n sds = sp.Matrix([1/sp.sqrt(cov[i, i]) for i \n in range(0, k)]*k).reshape(k, k)\n \n cor = cov.multiply_elementwise(sds).multiply_elementwise(sds.T)\n return cor.applyfunc(sp.simplify)", "def auto_correlation(arr):\n return cross_correlation(arr, arr)", "def corrcoef(self):\n return self.cov / self.std / self.std[:, None]", "def corrfunc(x, y, **kws):\n r, _ = stats.pearsonr(x, y)\n ax = plt.gca()\n ax.annotate(r\"$\\rho$ = {:.2f}\".format(r),xy=(.5, .9), xycoords=ax.transAxes)", "def get_correlation(spreadsheet_mat, phenotype_response, run_parameters):\n correlation_array = np.zeros(spreadsheet_mat.shape[0])\n if 'correlation_measure' in run_parameters:\n if run_parameters['correlation_measure'] == 'pearson':\n\n spreadsheet_mat = spreadsheet_mat - spreadsheet_mat.mean(axis=1).reshape((-1, 1))\n phenotype_response = phenotype_response - phenotype_response.mean()\n spreadsheet_mat_var = np.std(spreadsheet_mat, axis=1)\n phenotype_response_var = np.std(phenotype_response)\n numerator = spreadsheet_mat.dot(phenotype_response)\n denominator = spreadsheet_mat_var * phenotype_response_var * spreadsheet_mat.shape[1]\n with np.errstate(divide='ignore', invalid='ignore'):\n correlation_array = np.true_divide(numerator, denominator)\n correlation_array[denominator==0] = 0\n\n return correlation_array\n\n if run_parameters['correlation_measure'] == 't_test':\n \n a = spreadsheet_mat[:, phenotype_response!=0]\n b = spreadsheet_mat[:, phenotype_response==0]\n d = np.mean(a, axis=1) - np.mean(b, axis=1)\n denom = np.sqrt(np.var(a, axis=1, ddof=1)/a.shape[1] + np.var(b, axis=1, ddof=1)/b.shape[1])\n with np.errstate(divide='ignore', invalid='ignore'):\n correlation_array = np.divide(d, denom)\n correlation_array[np.isnan(denom)] = 0\n correlation_array = np.abs(correlation_array)\n\n return correlation_array\n\n return correlation_array", "def get_correlation_coefficient(self, xdata, ydata, xsystkey, ysystkey):\n if len(set(xdata)) == 1:\n logging.warning(\n \"Parameter %s appears to not have been varied. \"\n \"i.e. all of the values in the set are the \"\n \"same. This will lead to NaN in the correlation \"\n \"calculation and so it will not be done.\"%xsystkey\n )\n if len(set(ydata)) == 1:\n logging.warning(\n \"Parameter %s appears to not have been varied. \"\n \"i.e. all of the values in the set are the \"\n \"same. This will lead to NaN in the correlation \"\n \"calculation and so it will not be done.\"%ysystkey\n )\n if (len(set(xdata)) != 1) and (len(set(ydata)) != 1):\n rho, pval = spearmanr(xdata, ydata)\n else:\n rho = np.nan\n pval = 0\n return rho, pval", "def corr_with(self, other):\n return self.data.corrwith(other)", "def correlation(self):\r\n\r\n c = np.corrcoef(self.input.data)\r\n c = c[tril_indices_from(c, -1)]\r\n\r\n return np.mean(c), stats.sem(c)", "def corr_dissim(x,y):\n \n if np.max(np.abs(x-y))==0:\n r = 0\n elif (np.sum(np.abs(x))==0) or (np.sum(np.abs(y))==0):\n r = np.nan\n else:\n r = 1-np.corrcoef(x,y)[0,1]\n \n return r", "def crosscorr(x, y, **kwargs):\r\n # just make the same computation as the crosscovariance,\r\n # but without subtracting the mean\r\n kwargs['debias'] = False\r\n rxy = crosscov(x, y, **kwargs)\r\n return rxy", "def calc_ic(data):\n return scs.spearmanr(data[:, 0], data[:, 1]).correlation", "def get_corr(xparam, yparam, x, y, xerr, yerr, cterm=None, cterm_unc=None):\n if (xparam == \"AV\" and yparam == \"NH_AV\") or (\n xparam == \"EBV\" and yparam == \"NH_EBV\"\n ):\n yfac = yerr / y\n xfac = xerr / x\n corr = -1.0 * xfac / yfac\n elif (\n xparam == \"RV\"\n and yparam == \"NH_AV\"\n and cterm is not None\n and cterm_unc is not None\n ):\n avfac = cterm_unc / cterm\n yfac = yerr / y\n corr = -1.0 * avfac / yfac\n elif xparam == \"AV\" and yparam == \"RV\":\n yfac = yerr / y\n xfac = xerr / x\n corr = xfac / yfac\n elif (\n ((xparam == \"RV\") or (xparam == \"AV\"))\n and ((yparam[0:3] == \"CAV\") or (yparam == \"bump_area\"))\n and cterm is not None\n and cterm_unc is not None\n ):\n avfac = cterm_unc / cterm\n yfac = yerr / y\n corr = -1.0 * avfac / yfac\n elif (\n ((xparam == \"RV\") or (xparam == \"EBV\"))\n and (yparam[0:1] == \"C\")\n and cterm is not None\n and cterm_unc is not None\n ):\n ebvfac = cterm_unc / cterm\n yfac = yerr / y\n corr = ebvfac / yfac\n else:\n corr = np.full(len(x), 0.0)\n\n return corr", "def corr(x, y, pX, pY, pxy):\n return cov(x, y, pxy) / (mystd(x, pX) * mystd(y, pY))", "def correlation(x, y):\n # Use data in natural key-value form\n xs = {}\n for (_, date, value) in x:\n xs[date] = value\n ys = {}\n for (_, date, value) in y:\n ys[date] = value\n\n # Fill 0s for missing dates\n for d in set(ys.keys()) - set(xs.keys()):\n xs[d] = 0\n for d in set(xs.keys()) - set(ys.keys()):\n ys[d] = 0\n\n x_avg = sum(xs.values()) / len(xs.values())\n y_avg = sum(ys.values()) / len(ys.values())\n\n # Pearson correlation coefficient for given sample\n covariance = 0\n x_variance = 0\n y_variance = 0\n for d in xs.keys():\n x_diff = xs[d] - x_avg\n y_diff = ys[d] - y_avg\n covariance += x_diff * y_diff\n x_variance += math.pow(x_diff, 2)\n y_variance += math.pow(y_diff, 2)\n if x_variance == 0:\n return -1\n elif y_variance == 0:\n return -2\n return covariance / (math.sqrt(x_variance) * math.sqrt(y_variance))", "def correlation(data, method, caption):\n columns = list(data)\n coefficients = data.astype(float).corr(method=method)\n results = []\n for i in range(len(columns)):\n for j in range(i + 1, len(columns)):\n coefficient = coefficients[columns[i]][columns[j]]\n results.append((\n abs(coefficient), coefficient,\n columns[i] + ' x ' + columns[j]))\n print('# ' + caption + ', ' + method)\n for result in reversed(sorted(results)):\n abs_coefficient, coefficient, columns_pair = result\n print (coefficient, columns_pair)", "def correlation(self):\n\n c = np.corrcoef(self.input.data)\n c = c[tril_indices_from(c, -1)]\n\n return np.mean(c), stats.sem(c)", "def matthews_corr(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n # only one (diagonal) cell is non-zero\n return 0.5\n elif b == n or c == n:\n # only one (non-diagonal) cell is non-zero\n return -0.5\n elif p1 == n or p2 == n or q1 == n or q2 == n:\n # one row or column is zero, another non-zero\n return 0.0\n\n return _div(self.covar(), sqrt(p1 * q1 * p2 * q2))", "def _compute_corr(fmap):\n fmap = fmap.view(fmap.size(0), fmap.size(1), -1)\n fmap = nn.functional.normalize(fmap, dim=2, eps=1e-08)\n corr = torch.bmm(fmap.permute(0, 2, 1), fmap)\n return corr.view(corr.size(0), -1)", "def _GetCorrelationFunction(Ri='S',Rj='D',AAP=[_Hydrophobicity,_hydrophilicity,_residuemass]):\n\tHydrophobicity=NormalizeEachAAP(AAP[0])\n\thydrophilicity=NormalizeEachAAP(AAP[1])\n\tresiduemass=NormalizeEachAAP(AAP[2])\n\ttheta1=math.pow(Hydrophobicity[Ri]-Hydrophobicity[Rj],2)\n\ttheta2=math.pow(hydrophilicity[Ri]-hydrophilicity[Rj],2)\n\ttheta3=math.pow(residuemass[Ri]-residuemass[Rj],2)\n\ttheta=round((theta1+theta2+theta3)/3.0,3)\n\treturn theta", "def correlation(x: pd.Series, y: pd.Series, d: int) -> pd.Series:\n # todo multiindex\n if isinstance(d, float):\n d = math.floor(d)\n if isinstance(x.index, pd.MultiIndex):\n x.name = 'x'\n y.name = 'y'\n j = x.to_frame().join(y.to_frame())\n res = []\n\n for g in j.groupby(level=1):\n\n a = g[1][x.name]\n b = g[1][y.name]\n r = a.rolling(window=d).corr(b)\n res.append(r)\n return pd.concat(res).reorder_levels([1, 0])\n else:\n return x.rolling(window=d).corr(y)", "def GetCorrelationFunction(Ri='S',Rj='D',AAP=[]):\n\tNumAAP=len(AAP)\n\ttheta=0.0\n\tfor i in range(NumAAP):\n\t\ttemp=NormalizeEachAAP(AAP[i])\n\t\ttheta=theta+math.pow(temp[Ri]-temp[Rj],2)\n\tresult=round(theta/NumAAP,3)\n\treturn result", "def correlation(C):\n if not isinstance(C, np.ndarray):\n raise TypeError(\"C must be a numpy.ndarray\")\n shape = C.shape\n if (len(shape) != 2) or shape[0] != shape[1]:\n raise ValueError(\"C must be a 2D square matrix\")\n\n diagonal = np.diag(C)\n\n # standard deviation\n std = np.sqrt(np.expand_dims(diagonal, axis=0))\n\n correlation = C / np.matmul(std.T, std)\n\n return correlation", "def crosscorr(datax, datay, lag=0):\n return datax.corr(datay.shift(lag))", "def calculate_corr(genotype_t, phenotype_t, residualizer=None, return_var=False):\n\n # residualize\n if residualizer is not None:\n genotype_res_t = residualizer.transform(genotype_t) # variants x samples\n phenotype_res_t = residualizer.transform(phenotype_t) # phenotypes x samples\n else:\n genotype_res_t = genotype_t\n phenotype_res_t = phenotype_t\n\n if return_var:\n genotype_var_t = genotype_res_t.var(1)\n phenotype_var_t = phenotype_res_t.var(1)\n\n # center and normalize\n genotype_res_t = center_normalize(genotype_res_t, dim=1)\n phenotype_res_t = center_normalize(phenotype_res_t, dim=1)\n\n # correlation\n if return_var:\n return torch.mm(genotype_res_t, phenotype_res_t.t()), genotype_var_t, phenotype_var_t\n else:\n return torch.mm(genotype_res_t, phenotype_res_t.t())", "def concordance_correlation_coefficient(y_true, y_pred,\n sample_weight=None,\n multioutput='uniform_average'):\n cor=np.corrcoef(y_true,y_pred)[0][1]\n \n mean_true=np.mean(y_true)\n mean_pred=np.mean(y_pred)\n \n var_true=np.var(y_true)\n var_pred=np.var(y_pred)\n \n sd_true=np.std(y_true)\n sd_pred=np.std(y_pred)\n \n numerator=2*cor*sd_true*sd_pred\n \n denominator=var_true+var_pred+(mean_true-mean_pred)**2\n\n return numerator/denominator", "def custom_corrcoef(X, Y=None):\n if Y is None:\n Y = X\n \n if X.shape[0] != Y.shape[0]:\n raise Exception(\"X and Y must have the same number of rows.\")\n \n X = X.astype(float)\n Y = Y.astype(float)\n \n X -= X.mean(axis=0)[np.newaxis,...]\n Y -= Y.mean(axis=0)\n \n xx = np.sum(X**2, axis=0)\n yy = np.sum(Y**2, axis=0)\n \n r = np.dot(X.T, Y)/np.sqrt(np.multiply.outer(xx,yy))\n \n return r", "def _update_correlation(self, clean_samples, prev_dependent_properties):\n batch_properties = self._get_correlation_dependent_properties(clean_samples)\n batch_corr = self._get_correlation(clean_samples, batch_properties)\n\n self.correlation_matrix = self._merge_correlation_helper(\n self.correlation_matrix, prev_dependent_properties[\"mean\"],\n prev_dependent_properties[\"std\"], self.total_samples - self.row_is_null_count,\n batch_corr, batch_properties[\"mean\"],\n batch_properties[\"std\"], batch_properties['count'])", "def EstimatedAutocorr(fw, data, pnum, trialnum, marker1, marker2): \n cycle_start = HeelStrike(fw, data, pnum, trialnum, marker1, marker2)\n x = cycle_start[2] \n time = cycle_start[1]\n drop_NA = np.vstack((x, time))\n #print drop_NA.shape, x.shape, y.shape\n drop_NA = drop_NA.T\n x = drop_NA[:,0]\n #x = x[~np.isnan(x).any()]\n \n #n = len(x)\n #var = np.var(x)\n tao = np.correlate(x, x, mode='full')\n # assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))\n #result = r/(var*(np.arange(n, 0, -1)))\n plt.figure(4)\n plt.plot(tao)\n return tao", "def ncor(x, y):\n d = np.correlate(x, x) * np.correlate(y, y)\n if d <= 0:\n return 0\n return np.correlate(x, y) / d ** 0.5", "def correlation_analysis():\n\n raw_covid_data = read_covid_data()\n\n pop_data = read_population()\n\n life_expectancy_data = read_life_expectancy()\n\n gdp_data = read_gdp()\n\n edu_data = read_education()\n\n int_data = read_internet()\n\n covid_joined = pd.merge(raw_covid_data, pop_data, on=\"Country\")\n\n covid_joined.insert(4, \"Confirmed rate\", covid_joined[\"Confirmed\"] / covid_joined[\"Population\"])\n covid_joined.insert(5, \"Death rate\", covid_joined[\"Death\"] / covid_joined[\"Population\"])\n\n covid_life_joined = pd.merge(covid_joined, life_expectancy_data, on=\"Country\")\n covid_life_gdp_joined = pd.merge(covid_life_joined, gdp_data, on=\"Country\")\n covid_life_gdp_edu_joined = pd.merge(covid_life_gdp_joined, edu_data, on=\"Country\")\n covid_life_gdp_edu_int_joined = pd.merge(covid_life_gdp_edu_joined, int_data, on=\"Country\")\n covid_life_gdp_edu_int_joined = covid_life_gdp_edu_int_joined[covid_life_gdp_edu_int_joined.Education != '..']\n covid_life_gdp_edu_int_joined = covid_life_gdp_edu_int_joined[covid_life_gdp_edu_int_joined.Internet != '..']\n covid_life_gdp_edu_int_joined['Education'] = covid_life_gdp_edu_int_joined['Education'].astype(float)\n covid_life_gdp_edu_int_joined['Internet'] = covid_life_gdp_edu_int_joined['Internet'].astype(float)\n\n sns.set()\n\n draw_histogram(covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"COVID-19 Confirmed rate\")\n draw_histogram(covid_life_gdp_edu_int_joined[\"Death rate\"], \"COVID-19 Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Life expectancy\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"Life expectancy\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Life expectancy\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"Life expectancy\", \"Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"GDP\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"GDP\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"GDP\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"GDP\", \"Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Education\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"Education\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Education\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"Education\", \"Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Internet\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"Internet\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Internet\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"Internet\", \"Death rate\")", "def get_corr(self):\n return self.corr_matrix, self.corr_signature", "def corr_coeff(self) -> float:\n correlation_coefficient = np.corrcoef(self.true, self.predicted)[0, 1]\n return float(correlation_coefficient)", "def _xcorrf(self, profile1, profile2, dx):\n corrf = np.correlate(profile2, profile1, mode = 'same') \\\n /np.sum(profile1**2)\n\n if np.isnan(corrf).any():\n displ = np.nan\n corr = 0\n else:\n displ = (np.where(corrf == np.max(corrf))[0][0] - len(corrf)//2)*dx\n corr = np.max(corrf)\n\n return displ, corr", "def _c_correlation(cls, X, y):\n su = np.zeros(X.shape[1])\n for i in np.arange(X.shape[1]):\n su[i] = cls._symmetrical_uncertainty(X[:, i], y)\n return su", "def calcDispCorrandR(aaconst, aacorr, caliConst, AAdata, outName):\n k=caliConst[3]\n n=caliConst[2]\n #Be sure to change this appropriately to the fixed dye conc\n x=k*((fixed_dye_conc-n)/55.5)\n n = n\n print(aaconst)\n perr2=np.sqrt(np.diag(aacorr))\n print(perr2)\n corrmat2=np.zeros([len(aaconst),len(aaconst)])\n for i in range(len(aacorr)):\n for j in range(len(aacorr)):\n ele=aacorr[i,j]\n diele=ele/(perr2[i]*perr2[j])\n corrmat2[i,j]=round(diele,3)\n print(corrmat2)\n #calculate the r^2 value\n AAss_res = 0\n AAss_total = 0\n residuals = np.zeros([len(AAdata[:,0]), 1])\n for i in range(len(AAdata[:,0])):\n residuals[i] = (DispCurve(AAdata[i,0],x,aaconst[0],aaconst[1],n,aaconst[2]) - AAdata[i,1])\n AAss_res += np.square(residuals[i])\n AAss_total += np.square((AAdata[i,1] - np.average(AAdata[:,1])))\n print(AAss_res)\n print(AAss_total)\n AAr_sq = 1 - (AAss_res/AAss_total)\n print(AAr_sq)\n #write out the fit results\n f = open(outName + \"_disp_constants.txt\", 'w')\n f.write(\"B\\ta\\tN\\tK\\n\")\n for i in range(len(aaconst)):\n f.write('%.9f' %aaconst[i] + \"\\t\")\n f.write(\"\\n\")\n for i in range(len(aacorr)):\n f.write('%.9f' %perr2[i] + \"\\t\")\n f.write(\"\\n\\n\")\n f.write(\"Correlation matrix :\\n\\n\")\n for i in range(len(aacorr)):\n for j in range(len(aacorr)):\n f.write('%.9f' %corrmat2[i,j]+'\\t')\n f.write(\"\\n\\n\")\n f.write(\"R^2 value : \\t\" + '%.9f' %AAr_sq)\n f.close()", "def crosscorr(datax, datay, lag=0):\n return datax.corr(datay.shift(lag))", "def corr(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.corr)(self, **kwargs)", "def correlation(self) -> int:\n return self._correlation", "def _calculate_cc(self, array, corr_range, tau_max, lag_mode):\n\n # lag_mode dict\n mode = self.lag_modi[lag_mode]\n only_tri = int(self.only_tri)\n\n if lag_mode == 'all':\n corrmat = numpy.zeros((2*tau_max + 1, self.N, self.N),\n dtype='float32')\n elif lag_mode == 'sum':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n elif lag_mode == 'max':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n\n # loop over all node pairs, NOT symmetric due to time shifts!\n for i in range(self.N-only_tri):\n for j in range((i+1)*only_tri, self.N):\n\n if mode == 2:\n maxcross = 0.0\n argmax = 0\n\n # loop over taus INCLUDING the last tau value\n for t in range(2*tau_max+1):\n\n # here the actual cross correlation is calculated\n crossij = (array[tau_max, i, :] * array[t, j, :]).mean()\n\n # fill in values in matrix depending on lag_mode\n if mode == 0:\n corrmat[t, i, j] = crossij\n\n elif mode == 1:\n if t <= tau_max:\n corrmat[1, i, j] += numpy.abs(crossij)\n if t >= tau_max:\n corrmat[0, i, j] += numpy.abs(crossij)\n\n elif mode == 2:\n # calculate max and argmax by comparing to previous\n # value and storing max\n if numpy.abs(crossij) > maxcross:\n maxcross = numpy.abs(crossij)\n argmax = t\n\n if mode == 2:\n corrmat[0, i, j] = maxcross\n corrmat[1, i, j] = argmax - tau_max\n\n if self.only_tri:\n if lag_mode == 'all':\n corrmat = corrmat + corrmat.transpose(0, 2, 1)[::-1]\n elif lag_mode == 'sum':\n corrmat[0] += corrmat[1].transpose()\n corrmat[1] = corrmat[0].transpose()\n elif lag_mode == 'max':\n corrmat[0] += corrmat[0].transpose()\n corrmat[1] -= corrmat[1].transpose()\n\n return corrmat", "def get_correlation(R, b_to_rate):\n X = []\n Y = []\n v_to_source = Ftree.R_to_v_to_source(R)\n for p, v in R:\n gp = v_to_source.get(p, None)\n if gp is not None:\n X.append(b_to_rate[frozenset([gp, p])])\n Y.append(b_to_rate[frozenset([p, v])])\n xbar = sum(X) / len(X)\n ybar = sum(Y) / len(Y)\n xvar = sum((x - xbar)**2 for x in X) / (len(X) - 1)\n yvar = sum((y - ybar)**2 for y in Y) / (len(Y) - 1)\n xstd = math.sqrt(xvar)\n ystd = math.sqrt(yvar)\n xycorr_num = sum((x - xbar) * (y - ybar) for x, y in zip(X, Y))\n xycorr_den = xstd * ystd * len(zip(X, Y))\n xycorr = xycorr_num / xycorr_den\n return xycorr", "def property_correlation(self, property_1, property_2):\n\n\t\tself.property_existence([property_1, property_2])\n\t\tcorrelation = self.df[property_1].corr(self.df[property_2])\n\n\t\treturn correlation", "def corr(A,B):\n\n # Rowwise mean of input arrays & subtract from input arrays themeselves\n A_mA = A - A.mean(1)[:,None]\n B_mB = B - B.mean(1)[:,None]\n\n # Sum of squares across rows\n ssA = (A_mA**2).sum(1);\n ssB = (B_mB**2).sum(1);\n\n # Finally get corr coeff\n return np.dot(A_mA,B_mB.T)/np.sqrt(np.dot(ssA[:,None],ssB[None]))", "def pearson_r(x, y):\r\n # Compute correlation matrix: corr_mat\r\n \r\n corr_mat=np.corrcoef(x,y)\r\n\r\n # Return entry [0,1]\r\n return corr_mat[0,1]", "def calcCaliCorrandR(constants, corr, data, outName):\n print(constants)\n perr=np.sqrt(np.diag(corr))\n print(perr)\n corrmat=np.zeros([len(constants),len(constants)])\n for i in range(len(corr)):\n for j in range(len(corr)):\n \n ele=corr[i,j]\n diele=ele/(perr[i]*perr[j])\n corrmat[i,j]=round(diele,3)\n print(corrmat)\n #calculate the r^2 value\n ss_res = 0\n ss_total = 0\n residuals = np.zeros([len(data[:,0]), 1])\n for i in range(len(data[:,0])):\n residuals[i] = (LangmuirCurve(data[i,0],constants[0],constants[1],constants[2],constants[3]) - data[i,1])\n ss_res += np.square(residuals[i])\n ss_total += np.square((data[i,1] - np.average(data[:,1])))\n print(ss_res)\n print(ss_total)\n r_sq = 1 - (ss_res/ss_total)\n print(r_sq)\n #write out the fit results\n f = open(outName + \"_cali_constants.txt\", 'w')\n f.write(\"B\\ta\\tN\\tK\\n\")\n for i in range(len(constants)):\n f.write('%.9f' %constants[i] + \"\\t\")\n f.write(\"\\n\\n\")\n for i in range(len(corr)):\n f.write('%.9f' %perr[i] + \"\\t\")\n f.write(\"\\n\\n\")\n f.write(\"Correlation matrix :\\n\\n\")\n for i in range(len(corr)):\n for j in range(len(corr)):\n f.write('%.9f' %corrmat[i,j]+'\\t')\n f.write(\"\\n\\n\")\n f.write(\"R^2 value : \\t\" + '%.9f' %r_sq)\n f.close()", "def coupling_coef_corrs(fits_path, dataset1, dataset2):\n fits = h5py.File(fits_path, 'r')\n coefs1 = np.median(fits[dataset1]['coupling_coefs'][:], axis=0)\n coefs2 = np.median(fits[dataset2]['coupling_coefs'][:], axis=0)\n\n n_neurons = coefs1.shape[0]\n corrs = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n corrs[neuron] = np.corrcoef(coefs1[neuron], coefs2[neuron])[0, 1]\n\n return corrs", "def find_correlation(convergence_data, radii, plot_correlation=False, plot_radii=False, fis=False, mu_diff=None,\n impact=False):\n correlations = []\n correlation_errs = []\n for cone_radius in radii:\n if fis or impact:\n pickle_in = open(\"MICE_SN_data_fis.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n if mu_diff is None:\n mu_diff = SN_data[f\"Radius{str(cone_radius)}\"][\"mu_diff\"]\n conv = np.array(convergence_data[f\"Radius{str(cone_radius)}\"][\"SNkappa\"])\n else:\n pickle_in = open(\"MICE_SN_data.pickle\", \"rb\")\n SN_data = pickle.load(pickle_in)\n # redshift_cut = [SN_data['SNZ'] > 0.2]\n if mu_diff is None:\n mu_diff = SN_data[\"mu_diff\"]\n conv = np.array(convergence_data[f\"Radius{str(cone_radius)}\"][\"SNkappa\"])\n\n conv_rank = rankdata(conv)\n mu_rank = rankdata(mu_diff)\n # print(mu_diff)\n diff = np.abs(conv_rank - mu_rank)\n rho = 1 - 6 / (len(conv) * (len(conv) ** 2 - 1)) * np.sum(diff ** 2)\n rho_err = np.sqrt((1 - rho ** 2) / (len(conv) - 1))\n correlations.append(rho)\n correlation_errs.append(rho_err)\n\n if plot_correlation:\n edges = np.linspace(-0.0065, 0.011, 6)\n bins = (edges[1:] + edges[:-1]) / 2\n mean_dmu = []\n standard_error = []\n for bin in bins:\n dmus = []\n for kappa, dmu in zip(conv, mu_diff):\n if bin - 0.007 / 4 < kappa <= bin + 0.0007 / 4:\n dmus.append(dmu)\n mean_dmu.append(np.mean(dmus))\n standard_error.append(np.std(dmus) / np.sqrt(len(dmus)))\n\n plt.plot([min(conv), max(conv)], [0, 0], color=grey, linestyle='--')\n plt.plot(conv, mu_diff, linestyle='', marker='o', markersize=2, color=colours[0])\n # plt.plot(conv, fit, color=colours[1], label=f'$\\Delta\\mu = {round(float(grad),3)}\\kappa$')\n plt.errorbar(bins, mean_dmu, standard_error, marker='s', color='r', markersize=3, capsize=3, linestyle='')\n plt.xlabel('$\\kappa$')\n plt.ylabel('$\\Delta\\mu$')\n plt.xlim([-0.008, 0.011])\n plt.legend(frameon=0, loc='lower right')\n plt.ylim([-0.3, 0.3])\n plt.text(0.0038, -0.19, f'$\\\\rho$ = {round(rho, 3)} $\\pm$ {round(rho_err, 3)}', fontsize=16)\n plt.show()\n\n if plot_radii:\n u_err = [correlations[i] + correlation_errs[i] for i in range(len(correlations))]\n d_err = [correlations[i] - correlation_errs[i] for i in range(len(correlations))]\n smooth_corr = savgol_filter([correlations[i] for i in range(len(correlations))], 11, 4)\n smooth_u_err = savgol_filter(u_err, 11, 4)\n smooth_d_err = savgol_filter(d_err, 11, 4)\n plt.plot([0, 30], [0, 0], color=grey, linestyle='--')\n plt.plot(radii, smooth_corr, color=colours[0])\n plt.plot(radii, [correlations[i] for i in range(len(correlations))], marker='x', color=colours[1],\n linestyle='')\n plt.fill_between(radii, smooth_u_err, smooth_d_err, color=colours[0], alpha=0.4)\n\n plt.xlabel('Cone Radius (arcmin)')\n plt.ylabel(\"Spearman's Rank Coefficient\")\n plt.gca().invert_yaxis()\n plt.show()\n return [correlations, smooth_corr, smooth_u_err, smooth_d_err, np.array(u_err) - np.array(correlations)]\n\n return correlations, correlation_errs", "def test_correlation_test_perfect_correlation(self):\r\n # These results were verified with R.\r\n obs = correlation_test([1, 2, 3, 4], [1, 2, 3, 4])\r\n self.assertFloatEqual(obs[:2],\r\n (0.99999999999999978, 2.2204460492503131e-16))\r\n self.assertEqual(len(obs[2]), 999)\r\n for r in obs[2]:\r\n self.assertTrue(r >= -1.0 and r <= 1.0)\r\n self.assertCorrectPValue(0.06, 0.09, correlation_test,\r\n ([1, 2, 3, 4], [1, 2, 3, 4]), p_val_idx=3)\r\n self.assertFloatEqual(obs[4], (0.99999999999998879, 1.0))", "def base_corr(self, cutoff=0.3, show=0):\n\tn = self.data_points\n\tlast_points = int(cutoff*n)\n\tfor i in range(2):\n\t self.the_result.y[i] = self.the_result.y[i] - self.the_result.y[i][:-last_points].mean()\n\tif show == 1 :\n\t return self.the_result\n\treturn self", "def vcor(x, y):\n\n x = x - x.mean(1)[:, None]\n x /= x.std(1)[:, None]\n y = y - y.mean(1)[:, None]\n y /= y.std(1)[:, None]\n\n r_est = (x * y).mean(1)\n\n return r_est", "def test_distance_correlation_fast(self):\n arr1 = np.array(((1,), (2,), (3,), (4,), (5,), (6,)))\n arr2 = np.array(((1,), (7,), (5,), (5,), (6,), (2,)))\n\n covariance = dcor_internals._u_distance_covariance_sqr_fast(\n arr1, arr2)\n self.assertAlmostEqual(covariance, -0.88889, places=5)\n\n correlation = dcor_internals._u_distance_correlation_sqr_fast(\n arr1, arr2)\n self.assertAlmostEqual(correlation, -0.41613, places=5)\n\n covariance = dcor_internals._u_distance_covariance_sqr_fast(\n arr1, arr1)\n self.assertAlmostEqual(covariance, 1.5556, places=4)\n\n correlation = dcor_internals._u_distance_correlation_sqr_fast(\n arr1, arr1)\n self.assertAlmostEqual(correlation, 1, places=5)", "def CORR(A: pd.DataFrame, B: pd.DataFrame, n) -> pd.DataFrame:\r\n A = A.unstack()\r\n B = B.unstack()\r\n res = A.rolling(n).corr(B)\r\n return res.stack()", "def _get_correlation(self, clean_samples, batch_properties):\n columns = self.options.correlation.columns\n clean_column_ids = []\n if columns is None:\n for idx in range(len(self._profile)):\n data_type = self._profile[idx].\\\n profiles[\"data_type_profile\"].selected_data_type\n if data_type not in [\"int\", \"float\"]:\n clean_samples.pop(idx)\n else:\n clean_column_ids.append(idx)\n\n data = pd.DataFrame(clean_samples).apply(pd.to_numeric, errors='coerce')\n means = {index:mean for index, mean in enumerate(batch_properties['mean'])}\n data = data.fillna(value=means)\n\n # Update the counts/std if needed (i.e. if null rows or exist)\n if (len(data) != batch_properties['count']).any():\n adjusted_stds = np.sqrt(\n batch_properties['std']**2 * (batch_properties['count'] - 1) \\\n / (len(data) - 1)\n )\n batch_properties['std'] = adjusted_stds\n # Set count key to a single number now that everything's been adjusted\n batch_properties['count'] = len(data)\n\n # fill correlation matrix with nan initially\n n_cols = len(self._profile)\n corr_mat = np.full((n_cols, n_cols), np.nan)\n\n # then, fill in the correlations for valid columns\n rows = [[id] for id in clean_column_ids]\n corr_mat[rows, clean_column_ids] = np.corrcoef(data, rowvar=False)\n\n return corr_mat", "def CORREL(list1, list2):\n list1 = np.array(list1)\n list2 = np.array(list2)\n try:\n return(np.corrcoef(list1, list2)[0,1])\n except:\n print('Invalid list objects: have you passed int or numeric list objects of same length?')", "def corr_score(file1,file2,delta,bin=1.,dur=100.,ncell=500):\r\n\td1 = numpy.loadtxt(file1)\r\n\td2 = numpy.loadtxt(file2)\r\n\tx = numpy.zeros(int(ncell*dur/bin))\r\n\ty = numpy.zeros(int(ncell*dur/bin))\r\n\tfor j in range(ncell):\r\n\t\tif d1.size == 2:\r\n\t\t\ts1 = numpy.array(d1[0]*(d1[1]==j))\r\n\t\telse:\r\n\t\t\ts1 = d1[d1[:,1]==j,0]\r\n\t\tif d2.size == 2:\r\n\t\t\ts2 = numpy.array(d2[0]*(d2[1]==j))\r\n\t\telse:\r\n\t\t\ts2 = d2[d2[:,1]==j,0]\r\n\t\tkern = numpy.append(numpy.arange(delta/bin),numpy.arange(delta/bin,-1,-1))\r\n\t\tts1,dump = pylab.histogram(s1,numpy.arange(0.,dur+bin,bin))\r\n\t\tts2,dump = pylab.histogram(s2,numpy.arange(0.,dur+bin,bin))\r\n\t\tx[j*dur/bin:(j+1)*dur/bin] = numpy.convolve(ts1,kern,'same')\r\n\t\ty[j*dur/bin:(j+1)*dur/bin] = numpy.convolve(ts2,kern,'same')\r\n x = x - pylab.mean(x)\r\n y = y - pylab.mean(y)\r\n cor = sum(x*y)/(len(x)*pylab.std(x)*pylab.std(y))\r\n return cor", "def _pearson_corrcoef_update(preds: Tensor, target: Tensor, mean_x: Tensor, mean_y: Tensor, var_x: Tensor, var_y: Tensor, corr_xy: Tensor, n_prior: Tensor, num_outputs: int) ->Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:\n _check_same_shape(preds, target)\n _check_data_shape_to_num_outputs(preds, target, num_outputs)\n n_obs = preds.shape[0]\n mx_new = (n_prior * mean_x + preds.mean(0) * n_obs) / (n_prior + n_obs)\n my_new = (n_prior * mean_y + target.mean(0) * n_obs) / (n_prior + n_obs)\n n_prior += n_obs\n var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)\n var_y += ((target - my_new) * (target - mean_y)).sum(0)\n corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0)\n mean_x = mx_new\n mean_y = my_new\n return mean_x, mean_y, var_x, var_y, corr_xy, n_prior", "def correlation(array, maxtau = 200, step_type = \"tau\", flag_normalize = True):\n\n array = array - numpy.mean(array)\n \n array2 = numpy.copy(array)\n \n c = numpy.zeros(maxtau)\n\n for i in range(0, maxtau):\n \n array2 = numpy.roll(array2, -1)\n \n if step_type == \"tau\":\n step = i+1\n elif step_type == \"1\":\n step = 1\n else: \n print(\"croc.Functions (correlation): step_type is not recognized, will use 'tau'\")\n step = i+1\n\n a = list(itertools.islice(array * array2, None, len(array)-i-1, step))\n \n c[i] = numpy.sum(a) / len(a)\n \n if flag_normalize:\n return c/c[0]\n else:\n return c", "def coupling_coef_corrs(coupling_coefs1, coupling_coefs2, correlation='pearson'):\n n_neurons = coupling_coefs1.shape[0]\n correlations = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n ccs1 = coupling_coefs1[neuron]\n ccs2 = coupling_coefs2[neuron]\n\n if np.array_equal(ccs1, ccs2):\n correlations[neuron] = 1.\n elif np.all(ccs1 == 0) or np.all(ccs2 == 0):\n correlations[neuron] = 0\n else:\n if correlation == 'pearson':\n correlations[neuron] = np.corrcoef(ccs1, ccs2)[0, 1]\n elif correlation == 'spearman':\n correlations[neuron] = spearmanr(ccs1, ccs2).correlation\n elif correlation == 'cosine':\n correlations[neuron] = cosine_similarity(ccs1, ccs2)\n\n return correlations", "def image_correlation(image1, image2):\n im1=im_to_coord(image1)\n im2=im_to_coord(image2)\n z1=im1[:,2]\n z2=im2[:,2]\n mu_z1 = z1.mean()\n mu_z2 = z2.mean()\n n = z1.shape[0]\n s_z1 = z1.std(0, ddof=n - 1)\n s_z2 = z2.std(0, ddof=n - 1)\n cov = np.dot(z1,\n z2.T) - n * np.dot(mu_z1,\n mu_z2)\n return cov / np.dot(s_z1, s_z2)" ]
[ "0.79405046", "0.75999177", "0.7460216", "0.7436758", "0.7395434", "0.7368859", "0.7285379", "0.7255133", "0.72522086", "0.7240516", "0.7132481", "0.71304506", "0.71147674", "0.7075051", "0.7072566", "0.70674735", "0.7047367", "0.70030606", "0.7002301", "0.69837224", "0.69723094", "0.6950531", "0.6949453", "0.69350356", "0.69350356", "0.68887043", "0.68824023", "0.6880477", "0.6872606", "0.68641037", "0.6821748", "0.678535", "0.67841715", "0.6751689", "0.6746694", "0.67310405", "0.67164516", "0.6710817", "0.6705639", "0.6697663", "0.6687052", "0.66869694", "0.6679085", "0.66708004", "0.66707134", "0.6669825", "0.6668272", "0.66521543", "0.6642643", "0.6638217", "0.66371936", "0.663247", "0.6615929", "0.6612309", "0.6606044", "0.6594311", "0.6568157", "0.65650254", "0.65632874", "0.6552114", "0.6547963", "0.65439934", "0.6532511", "0.6519838", "0.65163344", "0.650717", "0.65070677", "0.6504305", "0.6499165", "0.6498716", "0.64951044", "0.64919174", "0.64899534", "0.64893883", "0.6480168", "0.64687335", "0.64660925", "0.64591175", "0.6455065", "0.64527977", "0.64526165", "0.64470553", "0.643842", "0.6437412", "0.64236623", "0.6421857", "0.64165676", "0.6416", "0.6407923", "0.64020973", "0.6396055", "0.63911504", "0.63866395", "0.6378197", "0.6374905", "0.63693076", "0.6361447", "0.6357357", "0.6357155", "0.6345497" ]
0.66404015
49
Creates a new user and adds it to the database. Returns user instance
def register(cls, username, email, password): hashed_password = bcrypt.generate_password_hash(password).decode("UTF-8") user = User(username=username, email=email, password=hashed_password) db.session.add(user) return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_user(self, **kwargs):\n kwargs = self._prepare_create_user_args(**kwargs)\n user = self.user_model(**kwargs)\n # noinspection PyUnresolvedReferences\n return self.save(user)", "def create_user(self, **kwargs):\n\n user = self.user_model(**self._prepare_create_user_args(**kwargs))\n return self.put(user)", "def create_user(self):\n return User.objects.create_user(**self.user_data)", "def create_user(self):\n if not self.is_valid():\n return None\n # generate a username \n ids = User.objects.values_list('id', flat=True).order_by('-id')[:1]\n if len(ids) > 0:\n # ids[0] will be the maximum value (due to order_by: '-id')\n idnum = ids[0] + 1\n else:\n idnum = 1\n # create User object \n username = \"user%s\" % idnum\n # NOTE: store email in lower case\n email = self.clean_email().lower()\n password = self.clean_password2()\n user = User(username=username, email=email, password='tmp')\n user.save()\n # set the real password\n user.set_password(password)\n # make user inactive (until user has confirmed account)\n user.is_active = False\n # update\n user.save()\n return user", "def new_user(request):\r\n rdict = request.params\r\n\r\n u = User()\r\n\r\n u.username = unicode(rdict.get('username'))\r\n if u.username:\r\n u.username = u.username.lower()\r\n u.email = unicode(rdict.get('email')).lower()\r\n passwd = get_random_word(8)\r\n u.password = passwd\r\n u.activated = True\r\n u.is_admin = False\r\n u.api_key = User.gen_api_key()\r\n\r\n try:\r\n DBSession.add(u)\r\n DBSession.flush()\r\n # We need to return the password since the admin added the user\r\n # manually. This is only time we should have/give the original\r\n # password.\r\n ret = dict(u)\r\n ret['random_pass'] = passwd\r\n return _api_response(request, ret)\r\n\r\n except IntegrityError, exc:\r\n # We might try to add a user that already exists.\r\n LOG.error(exc)\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: User exists.',\r\n })", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('Username is required.')\n if not email:\n raise ValueError('Email is required.')\n if not password:\n raise ValueError('Password is required.')\n try:\n with transaction.atomic():\n user = self.model(username=username, email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n except:\n raise", "def create_user():\n new_user = User(id=login_session['gplus_id'],\n name=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n session.add(new_user)\n session.flush()\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id", "def create_user(self):\n u = USER.objects.create(username='test_user1',\n email='test_email@example.com', )\n u.set_password('test_password')\n u.save()\n self.user = u\n return u", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='test@test.com')[0]\n user.set_password('testabc123')\n user.save()\n\n return user", "def create_user(self):\n return UserFactory.create()", "def signup(cls, username, password):\n\n hashed = bcrypt.generate_password_hash(password).decode(\"utf8\")\n user = User(\n username=username,\n password=hashed\n )\n db.session.add(user)\n\n return user", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='test@test.com')[0]\n user.set_password('testabc123')\n user.save()\n return user", "def signup(cls, username, first_name, last_name, email, password):\n\n hashed_pwd = bcrypt.generate_password_hash(password).decode('UTF-8')\n\n user = User(\n username=username,\n first_name=first_name,\n last_name=last_name,\n email=email,\n password=hashed_pwd,\n )\n\n db.session.add(user)\n return user", "def create_user(self, *args, **kwargs):\n user = User.objects.create_user(*args, **kwargs)\n return get_profile(user)", "def create(cls, name, username, email, password):\n new_user = cls(name=name,\n username=username,\n email=email\n )\n new_user.password = bcrypt.generate_password_hash(\n password).decode('utf-8')\n\n db.session.add(new_user)\n db.session.commit()\n\n return new_user", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('The given username must be set')\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, **extra_fields)\n user.password = make_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('The given username must be set')\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_new_user(user: User, session: Session) -> User:\n try:\n user_query = session.query(User).filter(User.username == user.username).first()\n if user_query is None:\n session.add(user) # Add the user\n session.commit() # Commit the change\n LOGGER.success(f\"Created user: {user}\")\n LOGGER.warning(f\"User already exists in database: {user}\")\n return user\n except IntegrityError as e:\n LOGGER.error(e.orig)", "def _create_user(self, email, password, first_name, last_name, **extra_fields):\n now = timezone.now()\n email = self.normalize_email(email)\n user = self.model(email=email,\n first_name=first_name,\n last_name=last_name,\n is_active=True,\n last_login=now,\n date_joined=now, **extra_fields)\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, password, **extra_kwargs):\n if not username:\n raise ValueError(\"User must have an username\")\n\n user = self.model(username=username, **extra_kwargs)\n user.set_password(password)\n\n user.save(using=self._db)\n\n return user", "def _create_user(self, email, password, **extra_fields):\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n print(\"create user\")\n return user", "def create_user(email, password):\n\n user = User(email=email, password=password)\n \n db.session.add(user)\n db.session.commit()\n\n return user", "def orm_create_user(session: Session, user: User) -> User:\n try:\n session.add(user) # Add the user\n session.commit() # Commit the change\n LOGGER.success(f\"Created new user: {user}\")\n return user\n except IntegrityError as e:\n LOGGER.error(e.orig)\n raise e.orig\n except SQLAlchemyError as e:\n LOGGER.error(f\"Unexpected error when creating user: {e}\")\n raise e", "def _create_user(self, username, name,\n email, password, **extra_fields):\n if not email:\n raise ValueError('Email field is required')\n email = self.normalize_email(email)\n user = self.model(\n username=username,\n name=name,\n email=email,\n **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create(self, data):\n # ensure 'create()' calls the specific 'create_user()' method\n # note that the 'data' gets validated\n user = get_user_model().objects.create_user(**data)\n return user", "def _create_user(self, password, **extra_fields):\n try:\n user = self.model(**extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n except:\n raise ValueError('ValueError: Cannot create new user')", "def _create_user(self, email, password, **extra_fields):\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_new_user(cls, user_email, user_password, user_phone):\n\n new_user = User(email=user_email, password=user_password, mobile_phone=user_phone)\n\n db.session.add(new_user)\n db.session.commit()\n\n print \"Successfully added new user with the email: %s\" % user_email", "def create_new_user(first_name, last_name, email, password):\n \n new_user = User(first_name, last_name, email, password)\n db.session.add(new_user)\n db.session.commit()\n \n # link a root storage folder to the user\n root_folder = Folder()\n db.session.add(root_folder)\n db.session.commit()\n new_user.storage_root_id = root_folder.id\n new_user.storage_root = root_folder\n db.session.commit()\n\n # link usage tracking to the user\n usage = Usage()\n usage.user_id = new_user.id\n new_user.usage = usage\n db.session.add(usage)\n db.session.commit()\n\n # link a billing address to the user\n billing_address = BillingAddress()\n billing_address.user_id = new_user.id\n new_user.billing_address = billing_address\n db.session.add(billing_address)\n db.session.commit()\n\n # link settings to the User\n settings = Settings()\n settings.user_id = new_user.id\n new_user.settings = settings\n db.session.add(settings)\n db.session.commit()", "def create_a_user(self, username='fry', email='fry@futur.ama', password='Qwerty!234'):\n user = User.objects.create_user(username, email, password)\n user.save()\n return user", "def create(self, validated_data):\n user = get_user_model().objects.create(\n username=validated_data['username'],\n )\n user.set_password(validated_data['password'])\n user.save()\n return user", "def _create_user(self, username, email, password):\n\t\tnow = datetime.now()\n\t\tif username is None:\n\t\t\traise ValueError('Must include username')\n\t\tif email is None:\n\t\t\traise ValueError('Must include email')\n\t\temail = self.normalize_email(email)\n\t\tuser = self.model(\n\t\t\temail=self.normalize_email(email),\n\t\t\tusername=username,\n\t\t\tdate_joined=now\n\t\t)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def create(self, validated_data):\n username = validated_data.pop('username')\n email = validated_data.pop('email')\n password = validated_data.pop('password')\n user = User.objects.create_user(\n username, email, password, **validated_data)\n return user", "def _create_user(self, username, email, password, **extra_fields):\n if not email:\n raise ValueError('The email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n validate_email(email)\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password=None, **extra_fields):\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email=None, password=None, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, username=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_new_user():\n return get_user_model().objects.create_user(\n email='test@gmail.com',\n password='test@londodnjisdjfois',\n username='tempusername'\n )", "def _create_user(self, email, password, **extra_fields):\n\t\tif not email:\n\t\t\traise ValueError('The given email must be set')\n\t\temail = self.normalize_email(email)\n\t\tuser = self.model(email=email, **extra_fields)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def _create_user(self, email, password, **extra_fields):\n\t\tif not email:\n\t\t\traise ValueError('The given email must be set')\n\t\temail = self.normalize_email(email)\n\t\tuser = self.model(email=email, **extra_fields)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def create_user(email, password, fname, lname):\n\n user = User(email=email, password=password, fname=fname, lname=lname)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def _create_user(self, username, email, password,\n is_staff, is_superuser, **extra_fields):\n now = timezone.now()\n if not username:\n raise ValueError('The given username must be set')\n email = self.normalize_email(email)\n user = self.model(username=username, email=email,\n is_staff=is_staff,\n is_superuser=is_superuser,\n **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, email, password=None,commit=True):\n\n\n user = self.model(\n email=self.normalize_email(email),\n username = username\n )\n\n user.set_password(password)\n if commit:\n user.save(using=self._db)\n\n return user", "def create_user(username, password):\n\n user = User(username=username, password=password)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def _create_user(self, email, first_name, last_name, password, **extra_fields):\n if not email:\n raise ValueError(_('Email Address is required'))\n email = self.normalize_email(email)\n user = self.model(\n email=email,\n first_name=first_name,\n last_name=last_name,\n **extra_fields\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email: str, password: str, **extra):\n try:\n user = self.model(email=self.normalize_email(email),\n **extra)\n user.set_password(password)\n user.save(using=self._db)\n except IntegrityError as Ex:\n raise IntegrityError(\"Duplicate\")\n return user", "def create_user(self, username, password=None, **extra_fields):\n user = self.model(username=username, **extra_fields)\n user.set_password(password)\n user.save()\n\n return user", "def create_user(username, password, user_fname, user_lname, email, profile_picture=\"/static/img/profile_pictures/default.png\"):\n\n user = User(username=username, password=password, user_fname=user_fname, user_lname=user_lname, profile_picture=profile_picture, email=email)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def _create_user(self, username, email, password,\n is_staff, is_superuser, **extra_fields):\n now = timezone.now()\n if not username:\n raise ValueError('The given username must be set')\n if not email:\n raise ValueError('The given email must must be set')\n email = self.normalize_email(email)\n user = self.model(username=username, email=email,\n is_staff=is_staff, is_active=True,\n is_superuser=is_superuser,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, username, email, password,\n is_staff, is_superuser, **extra_fields):\n now = timezone.now()\n if not username:\n raise ValueError('The given username must be set')\n email = self.normalize_email(email)\n user = self.model(username=username, email=email,\n is_staff=is_staff, is_active=True,\n is_superuser=is_superuser,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, username, email, password,\n is_staff, is_superuser, **extra_fields):\n now = timezone.now()\n if not username:\n raise ValueError('The given username must be set')\n email = self.normalize_email(email)\n user = self.model(username=username, email=email,\n is_staff=is_staff, is_active=True,\n is_superuser=is_superuser,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, username, email, password, phone, **extra_fields):\n\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, phone=phone, **extra_fields) # using email_id instead of email\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(fname, lname, email, username, password, category, country):\n try:\n user = User(fname=fname,\n lname=lname,\n email=email,\n username=username,\n password=password,\n preferred_category_id=category,\n preferred_country_id=country)\n\n db.session.add(user)\n db.session.commit()\n return user\n\n except IntegrityError:\n db.session.rollback()\n return None", "def create(self, validated_data):\n user = User.objects.create_user(\n email=validated_data['email'],\n password=validated_data['password'],\n )\n return user", "def _create_user(self, username, first_name, last_name, email, password, is_staff, is_superuser,\n **extra_fields):\n\n now = timezone.now()\n\n if not email:\n raise ValueError('Email is Required!')\n if not username:\n raise ValueError('Username is Required!')\n\n email = self.normalize_email(email)\n user = self.model(username=username, first_name=first_name, last_name=last_name, email=email,\n is_staff=is_staff, is_active=True, is_superuser=is_superuser, last_login=now,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_new_user(self):\n username = 'pseudo'\n email = 'carole@tests.com'\n password = '00000000'\n user_created = self.user.objects.create_user(id=1, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n\n return user_created", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(\"The given email must be set\")\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('Please provide your email address'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, username, first_name, last_name, password):\n\n email = self.normalize_email(email)\n\n user = self.model(\n email=email,\n username=username,\n first_name=first_name,\n last_name=last_name\n )\n\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def _create_user(self, **fields):\n email = fields.pop('email')\n password = fields.get('password1')\n if not email:\n raise ValueError(\"Email address is required\")\n email = self.normalize_email(email)\n user = self.model(email=email, **fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create(self, validated_data: dict):\n return User.objects.create_user(**validated_data)", "def createUser(self):\n if self.user:\n return self.user\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import CurrentUserProvider\n properties = {'account': CurrentUserProvider(),\n 'status': 'valid', 'is_developer': self.dev_test}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user", "def _create_user(self,email,password,**extra_fields):\n\t\tif not email:\n\t\t\traise ValueError('The given email must be set')\n\n\t\ttry:\n\t\t\twith transaction.atomic():\n\t\t\t\tuser = self.model(email=email,**extra_fields)\n\t\t\t\tuser.set_password(password)\n\t\t\t\tuser.save(using=self._db)\n\t\t\t\treturn user\n\t\texcept:\n\t\t\traise", "def add_user(first_name,last_name,email,password,typeOfUser):\n user=User.objects.create(first_name=first_name,last_name=last_name,email=email,password=password,role=typeOfUser)\n return user", "def create_user(user: User):\n coll = data_access.get_user_collection()\n\n if user.name == \"\":\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"User name must not be empty.\")\n\n if coll.find_one(user.dict()) is None:\n coll.insert_one(user.dict())", "def _create_user(self, username, email, password, is_staff, is_superuser, first_name, last_name):\n now = timezone.now()\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email,username=username,\n first_name=first_name, last_name=last_name,\n is_staff=is_staff, is_active=True,\n is_superuser=is_superuser,\n date_joined=now)\n user.uuid = generate_uuid()\n user.uniqueid = user.uuid[:4]\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, password):\n return self.User.objects.create_user(username, password=password)", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, name, email, password):\n new_user = User(name=name, email=email, password=password)\n db.session.add(new_user)\n db.session.commit()", "def create_user(self, username, email, password, **other_fields):\n if not username or not email:\n raise ValueError(_('The email and username must be set.'))\n email = self.normalize_email(email)\n\n user = self.model(username=username, email=email, **other_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, username, password, is_staff, **extra_fields):\n now = timezone.now()\n if not username:\n raise ValueError('The given username must be set')\n user = self.model(username=username,\n is_staff=is_staff, is_active=True,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def add_user():\n\n email = request.form[\"email\"]\n password = request.form[\"password\"] \n fname = request.form[\"fname\"]\n lname = request.form[\"lname\"]\n macaddress = request.form[\"macaddress\"]\n role = request.form[\"role\"]\n\n password_hash = generate_password_hash(password, method='sha256', salt_length=8)\n # create a new User object.\n new_user = User(email=email, password=password_hash,\n fname=fname, lname=lname, macaddress=macaddress, role=role)\n\n # add new user to db\n db.session.add(new_user)\n # commit the new add.\n db.session.commit()\n\n return userSchema.jsonify(new_user)", "def _create_user(self, email, username, password,\n is_staff, is_superuser, **extra_fields):\n now = timezone.now()\n # if not username:\n # raise ValueError('The given username must be set')\n email = self.normalize_email(email)\n user = self.model(username=username, email=email,\n is_staff=is_staff, is_active=True,\n is_superuser=is_superuser,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, **extra_fields):\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.save(using=self._db)\n return user", "def create_user(self, email, password, username, **extra_fields):\n if not email:\n raise ValueError(_('Email is required.'))\n if not username:\n raise ValueError(_('Username is required.'))\n email = self.normalize_email(email)\n username = username\n user = self.model(email=email, username=username, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, username, email, password,\n is_staff, is_superuser, **extra_fields):\n now = timezone.now()\n if not email:\n raise ValueError('The given username must be set')\n\n email = self.normalize_email(email)\n user = self.model(username=email, email=email,\n is_staff=is_staff, is_active=True,\n is_superuser=is_superuser,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n \traise ValueError('Must provide a valid email address')\n\n now = timezone.now()\n user = self.model(\n email=self.normalize_email(email),\n date_joined=now,\n last_login=now,\n **extra_fields\n ) \n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n\n if not email:\n raise ValueError('The given email must be set')\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def add_user(self, user):\n c = self.conn.cursor()\n cursor = c.execute(\"INSERT INTO users VALUES (null, ?, ?, ?)\",\n (user['username'], user['email'], hash_password(user['password']),))\n self.conn.commit()\n return self.get_user(cursor.lastrowid)", "def _create_user(self, email, username, firstname, lastname, password, **other_fields):\n\n if not email:\n raise ValueError(_('You must provide an email address'))\n\n email = self.normalize_email(email)\n user = self.model(email=email, username=username, firstname=firstname, lastname=lastname, **other_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def add_user(username, email, password, is_staff):\n\n user = User.objects.get_or_create(username=username, email=email)[0]\n user.set_password(password)\n user.is_staff = is_staff\n if is_staff:\n user.is_superuser = True\n user.save()\n registered_user = RegisteredUser.objects.get_or_create(user=user)[0]\n registered_user.save()\n return registered_user", "def create_user(email, password, first_name, last_name,\n confirmed=False,\n role=None,\n with_google=False,\n profile_pic_url=None):\n\n # pylint: disable=too-many-arguments\n\n user = User(email=email,\n first_name=first_name,\n last_name=last_name)\n if password:\n user.password = password\n if confirmed:\n user.confirmed = True\n if role:\n user.role = Role.query.filter_by(name=role).first()\n if with_google:\n user.registered_with_google = True\n if profile_pic_url:\n user.profile_pic_url = profile_pic_url\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def _create_user(self, username, password,\n is_staff, is_superuser, **extra_fields):\n now = timezone.now()\n if not username:\n raise ValueError('The given username must be set')\n username = self.normalize_email(username)\n user = self.model(username=username,\n is_staff=is_staff, is_active=True,\n is_superuser=is_superuser, last_login=now,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user" ]
[ "0.82439464", "0.82260287", "0.8174223", "0.81366533", "0.7910809", "0.7828572", "0.78101397", "0.7780627", "0.77802235", "0.7757555", "0.7754437", "0.7749895", "0.7744491", "0.77402997", "0.7723939", "0.77193147", "0.76944625", "0.7688455", "0.7675639", "0.7663784", "0.76571363", "0.76546293", "0.7650244", "0.76500773", "0.7648588", "0.7641405", "0.7639227", "0.7638993", "0.76351404", "0.7632724", "0.76318", "0.76284605", "0.7624064", "0.7616034", "0.76145816", "0.76080525", "0.76079774", "0.7601575", "0.7599782", "0.7595758", "0.7595758", "0.75917304", "0.7590685", "0.7588829", "0.7586656", "0.7584763", "0.75838333", "0.75803494", "0.75752133", "0.7572501", "0.7562906", "0.7562906", "0.7561475", "0.75544274", "0.7554216", "0.75503117", "0.75493574", "0.7549078", "0.7543365", "0.7539929", "0.7539929", "0.7539929", "0.7539929", "0.7539929", "0.7539929", "0.7539929", "0.7539929", "0.75378025", "0.75336903", "0.7529014", "0.7528761", "0.75274014", "0.7525588", "0.75250065", "0.75248003", "0.75212806", "0.752093", "0.7519536", "0.751907", "0.75160086", "0.7512395", "0.75117964", "0.7510592", "0.75076514", "0.7505363", "0.7501286", "0.74969727", "0.7495043", "0.74915063", "0.74915063", "0.74915063", "0.74915063", "0.74915063", "0.74915063", "0.74915063", "0.74887764", "0.74838126", "0.7482574", "0.7482479", "0.7481998" ]
0.7531111
69
create and eventually load model
def create_model(model_class, model_params=None, model_name='model'): model_params = {} if model_params is None else model_params model = model_class(**model_params) if special_parameters.load_model: # recover from checkpoint _load_model(model, model_name) # configure usage on GPU if use_gpu(): model.to(first_device()) model = torch.nn.DataParallel(model, device_ids=all_devices()) # print info about devices print_info('Device(s)): ' + str(device_description())) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_model(self):\n pass", "def create_model(self):\n pass", "def load_model(self):\n pass", "def load_model(self) -> Any:", "def create_model(self):\n try:\n self.model = PPO2.load(self.save_path)\n self.model.set_env(self.env)\n print(\"Loading of the latest model successful!\")\n except:\n print(\"Creating new model...\")\n self.model = PPO2(CnnPolicy, self.env, verbose=1)", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def __load_model(self):\n loaded = load(self.__file_name)\n self.__model = loaded['model']\n self.__meta_data = loaded['metadata']\n self.__is_ready = True", "def load_model(self) -> None:\n\n try:\n model_class = MODEL_TYPES[self.model_type]\n except KeyError:\n raise KeyError(f\"model type: {self.model_type} not supported\")\n\n if (\n os.path.exists(self.resources_path)\n and len(os.listdir(self.resources_path)) > 0\n ):\n model_name_or_path = self.resources_path\n else:\n model_name_or_path = self.model_name\n\n if self.model_type == \"stable_diffusion\":\n self.model = model_class.from_pretrained(\n model_name_or_path,\n use_auth_token=self.auth_token,\n )\n else:\n self.model = model_class.from_pretrained(model_name_or_path)\n\n self.model.to(self.device)", "def load_model(self, model_path: str):", "def load(path_to_model):\n pass", "def load_model(self, path):\n pass", "def create_models( self ):", "def build_model():", "def _create_model(self, key):\n pass", "def create_model(self):\n self.create_model_file()\n self.create_model_unit_test()\n self.add_model_to_list()\n self.readme_reminder()", "def create_model(self):\n self.model = None\n pass", "def load_model(self, filename):\r\n pass", "def load_model(self, tmp_dir):\n pass", "def initialize(self) -> None:\n self.model = load(self.path)", "def get_or_create_model(self) -> Model:\n assert self.model_name\n\n print(\"Check if Model exists.\")\n if self.model_name in self.models:\n print(\"Model does exists.\")\n # if get_model(self.model_name).tags['train_py_hash'] == self.get_file_md5(\n # self.source_directory + \"/\" + self.script):\n model = Model(self, name=self.model_name)\n if not os.path.isdir(\"outputs\"):\n model.download(\"outputs\", exist_ok=True)\n return model\n print(\"Model does not exists.\")\n model = self.train_model()\n\n assert model\n if self.show_output:\n print(model.name, model.version, model.url, sep=\"\\n\")\n return model", "def init_model(self):\n pass", "def load_model(self):\n # Load the model\n print('Loading model:', self.model_path)\n t0 = time.time()\n model = load_model(self.model_path)\n t1 = time.time()\n print('Loaded in:', t1 - t0)\n return model", "def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return", "def load_model(self):\n Thread(target=self.__load_model).start()", "def MakeModel(self):\n pass", "def load_model():\n with open(MODEL_SAVE_JSON, 'r') as fp:\n json_string = fp.read()\n model = model_from_json(json_string)\n return model", "def gen_model():\n\n\tmodel = skipthoughts.load_model()\n\treturn model", "def _load_model(self):\n with open(self.filepath, 'rb') as file:\n self.cmodel = pickle.load(file)", "def load_model():\n logger.info('load_model called')\n return 1", "def __load(self, model_name):\n\n print(\"Loading model.\")\n tstart = datetime.now()\n\n # Temporary directory to extract the zipped information\n with tempfile.TemporaryDirectory() as dirpath:\n\n # Unzip the directory that contains the saved model(s)\n with zipfile.ZipFile(model_name + \".zip\", \"r\") as zip_ref:\n zip_ref.extractall(dirpath)\n\n # Load metadata\n metadata = pickle.load(open(dirpath + \"/metadata.pickle\", \"rb\"))\n\n # Re-load metadata\n self.__dict__.update(metadata)\n\n # Load all sub-models\n try:\n self.__mol_to_latent_model = load_model(\n dirpath + \"/mol_to_latent_model.h5\"\n )\n except:\n print(\"'mol_to_latent_model' not found, setting to None.\")\n self.__mol_to_latent_model = None\n\n self.__latent_to_states_model = load_model(\n dirpath + \"/latent_to_states_model.h5\"\n )\n self.__batch_model = load_model(dirpath + \"/batch_model.h5\")\n \n # Build sample_model out of the trained batch_model\n self.__build_sample_model(batch_input_length=1) # Single-output model\n self.__build_sample_model(\n batch_input_length=256 # could also be self.batch_size\n ) # Multi-output model\n\n print(\"Loading finished in %i seconds.\" % ((datetime.now() - tstart).seconds))", "def load(self):\n print(\"==> Loading model from\", self.model_dir)\n self.model = tf.keras.models.load_model(self.model_dir)", "def load(\n self,\n modelLoadPath\n ):\n pass", "def load_model() -> None:\n global model\n\n if app.testing:\n current_dir = os.path.dirname(__file__)\n model_path = os.path.join(current_dir, \"models/model.pkl\")\n else:\n model_path = os.getenv(\"PATH_TO_MODEL\")\n\n if model_path is None:\n err = f\"PATH_TO_MODEL {model_path} is None\"\n raise RuntimeError(err)\n\n with open(model_path, \"rb\") as model_file:\n model = pickle.load(model_file)", "def __pull_model(self):\n\n model = ArmModeler().get(self.name)\n\n if model:\n logger.debug(\"model creating...\")\n self.alpha = model[\"alpha\"]\n self.a = model[\"a\"]\n self.q = model[\"q\"]\n self.d = model[\"d\"]\n self.dh_params = model[\"dh_params\"]\n self.tf_matrices_list = model[\"transform_matrices\"]\n self.jacobian_matrix = model[\"jacobian_matrix\"]\n\n else:\n ArmModeler().create(self.name)\n self.__pull_model()", "def createModel(self):\n model_psp = self.getModelPsp()\n\n if not model_psp:\n log_func.warning(u'Not define model in <%s : %s>' % (self.getName(), self.getType()))\n return None\n\n model_name = self.newPassport().setAsStr(model_psp).name\n\n scheme = self.getScheme()\n if scheme:\n return scheme.getModel(model_name)\n else:\n log_func.warning(u'Error create data scheme object')\n return None", "def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model", "def initialize_model(self):\n pass", "def load_model(self):\n saved_path = self.config.path_tmp / self.model.model_name\n if saved_path.exists():\n self.model.load_weights(str(saved_path / 'model.vec'))", "def load_model(self):\n if os.path.exists(self.model_filename):\n self.model.load_weights(self.model_filename)", "def create_or_load_model(model, model_dir, session, name):\n latest_ckpt = tf.train.latest_checkpoint(model_dir)\n if latest_ckpt:\n start_time = time.time()\n # It only takes a few seconds to initialize all variables.\n session.run(tf.global_variables_initializer())\n logging.info(\n \"Initialize %s model with fresh parameters before loading variables \"\n \"from the checkpoint, time %.2fs\", name,\n time.time() - start_time)\n model = load_model(model, latest_ckpt, session, name)\n else:\n start_time = time.time()\n session.run(tf.global_variables_initializer())\n session.run(tf.tables_initializer())\n utils.print_out(\" created %s model with fresh parameters, time %.2fs\" %\n (name, time.time() - start_time))\n\n global_step = model.global_step.eval(session=session)\n return model, global_step", "def create_model(self) -> None:\n self._model = create_model_with_temperature_scaling(self.config)", "def load(self):\n utils.get_previous_weights_from_gdrive(self.config.model_folder)\n last_used_model = utils.get_latest_model_name(self.config.model_folder)\n self.model = load_model(last_used_model)\n self.model.summary()", "def create_model(self):\n model = solph.Model(self.es)\n return model", "def _build_model(self):\n raise NotImplementedError()", "def load_model(self, fl_ctx: FLContext) -> ModelLearnable:\n if os.path.exists(self.save_path):\n self.logger.info(\"Loading server model\")\n model = load(self.save_path)\n else:\n self.logger.info(f\"Initialization, sending global settings: {self.initial_params}\")\n model = self.initial_params\n model_learnable = make_model_learnable(weights=model, meta_props=dict())\n\n return model_learnable", "def load_model(app: FastAPI) -> None:\n\n logging.info(\"Starting up the application\")\n model_path = DATA_MODEL_PATH\n\n if model_path.exists():\n model = FraudDetection(model_path)\n app.state.model = model\n logging.info(f\"Loaded model {model_path}\")\n else:\n app.state.model = FraudDetection()\n logging.warning(f\"No existing model found in {model_path}\")", "def build_model(self):\n pass", "def build_model(self):\n pass", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load_model():\r\n model = MobileNetV2(weights=\"imagenet\")\r\n print(\"Model loaded\")\r\n return model", "def load_model(self, model_name, model_url):\n\n fname = join(self.root, model_name)\n if not isfile(fname):\n if self.verbose:\n print(\"Could not find \" + fname + \".. attempt download\")\n with urllib.request.urlopen(model_url) as res, open(fname, 'wb') as f:\n shutil.copyfileobj(res, f)\n if self.verbose:\n print(\"Download complete.. model: \" + fname)\n elif self.verbose:\n print(\"Found model \" + fname + \"! :)\")\n\n model = load_model(fname)\n self.model = model", "def load_model_(model_name='Architecture_2/Arch_2_2', classification_type='transition'):\n # load json and create model\n json_file = open('../results/models/{}/{}.json'.format(classification_type, model_name), 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n model = model_from_json(loaded_model_json)\n # load weights into new model\n model.load_weights(\"../results/models/{}/{}.h5\".format(classification_type, model_name))\n print(\"Loaded model from disk\")\n return model", "def load_model(self):\r\n try:\r\n self.model = CRNN_STN(self.crnn_cfg())\r\n self.model.load_weights(config.CRNN_Model_Path)\r\n except:\r\n print('Error in method {0} in module {1}'.format('load_model', 'crnn_bridge.py'))", "def model(self, new_model):\n if self.locations.empty:\n raise RuntimeError(\"Cannot create a model until locations exist\")\n writer = ModelWriter(self, self.dismod_file)\n new_model.write(writer)\n writer.close()", "def maybe_load_model(self):\n if self.model:\n return\n\n assert self.model_path, \"No model path\"\n\n _LOGGER.debug(\n \"Loading model from %s (beam width=%s)\", self.model_path, self.beam_width\n )\n self.model = deepspeech.Model(str(self.model_path))\n self.model.setBeamWidth(self.beam_width)\n\n if (\n self.scorer_path\n and self.scorer_path.is_file()\n ):\n _LOGGER.debug(\n \"Enabling language model (scorer=%s, lm_alpha=%s, lm_beta=%s)\",\n self.scorer_path,\n self.lm_alpha,\n self.lm_beta,\n )\n self.model.setScorerAlphaBeta(\n self.lm_alpha,\n self.lm_beta\n )\n self.model.enableExternalScorer(\n str(self.scorer_path)\n )", "def basic_model_init(model_args, task_infos, tokenizer):\n config = AutoConfig.from_pretrained(\n model_args.model_name_or_path,\n num_labels=task_infos.num_labels,\n cache_dir=model_args.model_cache_dir,\n id2label=task_infos.id2label,\n label2id=task_infos.label2id,\n )\n model_cls = getattr(mod, model_args.architectures,\n AutoModelForSequenceClassification)\n model = model_cls.from_pretrained(\n model_args.model_name_or_path,\n config=config,\n cache_dir=model_args.model_cache_dir,\n )\n if model.config.vocab_size < len(tokenizer):\n print(\"resize...\")\n model.resize_token_embeddings(len(tokenizer))\n return model", "def load_model(self, filename):\n filename = path.join(self.root_path, f'models/{filename}.pkl')\n self.model = pickle.load(open(filename, \"rb\"))\n print('Successfully loaded model from '+filename)", "def _init_model(self):\n try:\n model_config = self._conf.get(PredictConstance.BASE_CONFIG,\n PredictConstance.MODEL_CONFIG)\n conf = configparser.ConfigParser()\n conf.read(model_config)\n self._model = model_factory.ModelFactory.create_model(config=conf)\n return True\n except Exception as err:\n self.managerlogger.logger.error(\"init model error: %s\" % err)\n self.errorlogger.logger.error(\"init model error: \\n %s\" % traceback.format_exc())\n return False", "def recent_model_init(model_args, task_infos, tokenizer):\n config = AutoConfig.from_pretrained(\n model_args.model_name_or_path,\n num_labels=task_infos.num_labels,\n cache_dir=model_args.model_cache_dir,\n id2label=task_infos.id2label,\n label2id=task_infos.label2id,\n )\n config.dense_type = model_args.dense_type\n config.act_type = model_args.act_type\n config.num_labels_per_head = [\n len(label_id) for label_id in task_infos.head_id_to_label_id\n ]\n config.head2label = task_infos.head_id_to_label_id\n model_cls = getattr(mod, model_args.architectures,\n RobertaForKlueRecent)\n model = model_cls.from_pretrained(\n model_args.model_name_or_path,\n config=config,\n cache_dir=model_args.model_cache_dir,\n )\n if model.config.vocab_size < len(tokenizer):\n print(\"resize...\")\n model.resize_token_embeddings(len(tokenizer))\n return model", "def __init__(self , model_file_name ):\n logging.set_verbosity(logging.ERROR)\n with TheTFGraph.as_default():\n with TheTFSession.as_default():\n self.model = keras.models.load_model( model_file_name + \".hdf5\" , compile=False )\n JSON = json.load( open(model_file_name + \".json\" ) )\n self.all_sites = list(JSON['all_sites'])\n self.all_errors = list(JSON['all_errors'])\n self.all_actions = list(JSON['all_actions'])\n self.IsBinary = bool(JSON['IsBinary'])\n self.TiersOnly = bool(JSON['TiersOnly'])\n self.Task = Task({} , \"TaskLoader\" , self)\n self.Name = model_file_name.split('/')[-1]\n self.ModelID = int( JSON['model'] )\n self.InputTrainingDataID = int( JSON['trainingdata'])\n\n self.Prediction = Prediction.Prediction( self.ModelID , self.InputTrainingDataID )", "def load_model(self, folder_name):\n raise NotImplementedError()", "def _create(self, model_obj: Any):\n conn = self.provider.get_connection()\n\n try:\n model_obj.save(\n refresh=True,\n index=self.model_cls._index._name,\n using=conn,\n )\n except Exception as exc:\n logger.error(f\"Error while creating: {exc}\")\n raise\n\n return model_obj", "def try_create_model_and_load_from_checkpoint(self) -> bool:\n self.create_model()\n if self.checkpoint_path:\n # Load the stored model. If there is no checkpoint present, return immediately.\n return self.try_load_checkpoint_for_model()\n return True", "def create_new_model(self, model_params):\n # Create a new model with param\n try:\n for param_name, param_val in model_params.items():\n self.cmodel.__dict__[param_name] = param_val\n except sklearn.exceptions.FitFailedWarning:\n pass\n\n # Generate new Id for model\n model_names = os.listdir('src')\n new_id = 1\n while f'model_{new_id}.pkl' in model_names:\n new_id += 1\n\n fp = self.filepath.split('/')[:-1]\n fp = '/'.join(fp)\n self.filepath = fp + '/model_' + str(new_id) + '.pkl'\n print(self.filepath)\n self._save_model()\n\n return new_id", "def load_model():\n global obj\n obj = NutritionTableDetector()\n print(\"Weights Loaded!\")", "def initialize_model(self):\n model = self.model_class()\n return model", "def load_model(self, path_model: Optional[PathLike]) -> None:\n raise NotImplementedError", "def load_model(self):\n if self.save_path is not None:\n if isfile(self.save_path):\n self.model.load_state_dict(load(self.save_path))\n else:\n raise ValueError(\"Cannot find model save file: \" + self.save_path)", "def _load_from(cls, model_state: dict) -> 'AbstractModel':\n raise NotImplementedError", "def load(self, path):\n load_model(path, self)", "def load_model(name):\n\tmodel = joblib.load(\"data/{}/{}.model\".format(name, name))\n\t# Setting n_jobs to 1 in case it was set to a higher number while training the model seems to makes predictions of single samples much faster.\n\tmodel.n_jobs = 1\n\treturn model", "def create_model(model_name, random_state, epoch, device, log_path, **hparams):\n model = eval(f'{model_name}')(\n **hparams, epoch=int(epoch), random_state=random_state, device=device,\n log_path=log_path\n )\n\n return model", "def create_model(configuration):\n model = find_model_using_name(configuration['model_name'])\n instance = model(configuration)\n print(\"model [{0}] was created\".format(type(instance).__name__))\n return instance", "def load_model(model, model_index, device=\"cpu\"):\n with open(\"trained_local_model\"+str(model_index), \"rb\") as f_:\n model.load_state_dict(torch.load(f_))\n model.to(device)\n return model", "def load_model(self):\n try:\n self.model = Word2Vec.load(self.config[\"model_path\"])\n self.model.init_sims(replace=True)\n except Exception as e:\n print(e)\n print(\"error in model loading!\")", "def init_model(model_filename, doGPU):\n # set model attributes list\n ##print(\"Model-dataset =\", model_ds_name)\n ##if model_ds_name == 'modelRAP':\n ## model_labels = loader_rapdataset_yiqiang.ATTRIBUTES\n ##elif model_ds_name == 'modelPETA':\n ## model_labels = loader_peta_dataset.ATTRIBUTES\n ##elif model_ds_name == 'modelRAPPETA':\n ## model_labels = [peta_label for rap_label,peta_label in loader_rap_plus_peta_dataset.ATTRIBUTES]\n ##else:\n ## print(\"ERROR: unknown model-dataset.\")\n ## sys.exit()\n model_labels = loader_rap_plus_peta_dataset.ATTRIBUTES\n assert (len(model_labels) == 49)\n\n # create model\n person.NO_ATTRIBUTES = len(model_labels) #TODO-elo: ugly, attr. nbr should be a parameter of person.Net.__init__()\n net = person.Net()\n if doGPU:\n net = person.Net().cuda()\n\n # load model\n print('loading model \"' + model_filename + '\"')\n person.load_model(net, model_filename)\n\n return net, model_labels", "def _get_model():\n freezing = os.environ.get('NAUCSE_FREEZE', not app.config['DEBUG'])\n initialize = True\n\n try:\n g.model = app.config['NAUCSE_MODEL']\n except KeyError:\n g.model = init_model()\n app.config['NAUCSE_MODEL'] = g.model\n else:\n if freezing:\n # Model already initialized; don't look for changes\n return\n\n # (Re-)initialize model\n\n g.model.load_licenses(Path(app.root_path).parent / 'licenses')\n g.model.load_local_courses(Path(app.root_path).parent)\n\n if freezing:\n g.model.freeze()", "def load_model(cls) -> Classifier:\n if cls.model is None:\n cls.model = Classifier.load(model_path)\n return cls.model", "def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return", "def load_model():\n global model_tok, model_mlm, model, model_cls\n if model is None:\n model_name_or_path = os.getenv('TRANSFORMER_MODEL', default='distilbert-base-multilingual-cased')\n # 'bert-base-multilingual-cased'\n model_tok = AutoTokenizer.from_pretrained(model_name_or_path)\n model_mlm = AutoModelForMaskedLM.from_pretrained(model_name_or_path)\n model_mlm.eval()\n model = model_mlm.base_model\n\n if isinstance(model_mlm, BertPreTrainedModel):\n model_cls = model_mlm.cls\n elif isinstance(model_mlm, DistilBertPreTrainedModel):\n model_cls = nn.Sequential(\n model_mlm.vocab_transform,\n nn.GELU(),\n model_mlm.vocab_layer_norm,\n model_mlm.vocab_projector\n )\n else:\n raise ValueError(f'{model_name_or_path} is not supported yet. try one of '\n f'{\", \".join(list(AvailableModels.__members__.keys()))}')\n model.to(device)\n model_mlm.to(device)\n # model_tok.to(device)\n model_cls.to(device)", "def load_model(self):\n if os.stat('code/lr-model.pt').st_size == 0:\n return\n params = torch.load('code/lr-model.pt')\n self.set_params(params)", "def load_model(self, file=None):\n return None", "def build_model(self):\n raise NotImplementedError", "def load_model(model_name):\r\n model = joblib.load(model_name)\r\n return model", "def load_model(self, **params):\n \t# file_name = params['name']\n # return pickle.load(gzip.open(file_name, 'rb'))", "def _load(self, req, id, body):\n context = req.environ['meteos.context']\n\n LOG.debug(\"Load model with request: %s\", id)\n\n try:\n model = self.engine_api.get_model(context, id)\n utils.is_valid_status(model.__class__.__name__,\n model.status,\n constants.STATUS_AVAILABLE)\n experiment = self.engine_api.get_experiment(\n context, model.experiment_id)\n template = self.engine_api.get_template(\n context, experiment.template_id)\n except exception.NotFound:\n raise exc.HTTPNotFound()\n\n self.engine_api.load_model(context,\n id,\n model.dataset_format,\n model.model_type,\n template.job_template_id,\n model.experiment_id,\n model.cluster_id)\n\n return {'model': {'id': id}}", "def _prepare_model(model):\n\n # Ensure there is at least 1 load combination to solve if the user didn't define any\n if model.LoadCombos == {}:\n # Create and add a default load combination to the dictionary of load combinations\n model.LoadCombos['Combo 1'] = LoadCombo('Combo 1', factors={'Case 1':1.0})\n \n # Generate all meshes\n for mesh in model.Meshes.values():\n if mesh.is_generated == False:\n mesh.generate()\n\n # Activate all springs and members for all load combinations\n for spring in model.Springs.values():\n for combo_name in model.LoadCombos.keys():\n spring.active[combo_name] = True\n \n # Activate all physical members for all load combinations\n for phys_member in model.Members.values():\n for combo_name in model.LoadCombos.keys():\n phys_member.active[combo_name] = True\n \n # Assign an internal ID to all nodes and elements in the model. This number is different from the name used by the user to identify nodes and elements.\n _renumber(model)", "def init_model(model_type):\n if model_type == 'magnitude':\n model = Magnitude('../model/crawl-300d-2M.magnitude')\n elif model_type == 'gensim':\n model = KeyedVectors.load('../model/pre_trained_word2vec_embeddings.bin')\n else:\n print(\"Invalid model type.\")\n sys.exit(1)\n return model, model_type", "async def load_model(\n self,\n model_name: str,\n headers: dict[str, t.Any] = ...,\n config: str = ...,\n files: dict[str, str] = ...,\n ) -> None:", "def load(self):\n try:\n if self.model.is_cuda:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \"save_point.pth\")))\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \\\n \"save_point.pth\"), map_location=\"cpu\"))\n except:\n sys.exit(\"Unable to load previous model\")", "def custom_model():\n\t# initialize the model\n\t# load weights from path\n\t# returns model\n\tmodel = mlp.get_training_model()\n\tmodel.load_state_dict(torch.load(\"model_wt.pth\"))\n\treturn model", "def load_model(self):\n self._logger.debug(f\"Loading Spacy Data Model : {self._model}... Could take time.\")\n self._nlp = spacy.load(self._model)\n self._logger.debug(\"Successfully loaded Spacy Data !\")\n\n # === Load entities ===\n if PIPE_ENTITY not in self._nlp.pipe_names:\n self._nlp.add_pipe(PIPE_ENTITY, last=True)\n\n entity_pipe = self._nlp.get_pipe(PIPE_ENTITY)\n for entity in self._entities:\n entity_pipe.add_label(entity)\n\n # === Load categories ===\n if PIPE_INTENT not in self._nlp.pipe_names:\n self._nlp.add_pipe(PIPE_INTENT, last=True)\n\n intent_pipe = self._nlp.get_pipe(PIPE_INTENT)\n for intent in self._intents:\n intent_pipe.add_label(intent)", "def build(model_name):\n return pretrain.factory.create(model_name)", "def load_model(self):\n if self.ckpt_flag:\n LOG('Skip Loading Pre-trained Model......')\n else:\n if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):\n try:\n LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)\n pretrain = torch.load(self.params.pre_trained_from)\n self.network.load_state_dict(pretrain)\n LOG('Pre-trained Model Loaded!')\n except:\n WARNING('Cannot load pre-trained model. Start training......')\n else:\n WARNING('Pre-trained model do not exits. Start training......')", "def load_model():\n\n # find location of model\n\n file_path = '/Users/davidodwyer/Desktop' # to the directory\n file_name = 'original_mlr.joblib' \n the_file = os.path.join(file_path, file_name)\n\n # load model\n\n model = load(the_file)\n\n return model", "def load_model(self, filename):\n event = teca_time_py_event('teca_deeplab_ar_detect::load_model')\n\n # this creates OpenMP thread pools and imports torch\n # it must be called *before* we import torch\n self.initialize()\n\n # import our torch codes only now that torch has been initialized\n global teca_deeplab_ar_detect_internals\n from teca_deeplab_ar_detect_internals \\\n import teca_deeplab_ar_detect_internals\n\n # create an instance of the model\n model = teca_deeplab_ar_detect_internals.DeepLabv3_plus(\n n_classes=1, _print=False)\n\n # load model weights from state on disk\n super().load_model(filename, model)", "def __create_model(self, classes):\r\n # self._model = model_zoo.get_model(model_name, classes=classes, pretrained_base=True)\r\n # self._model = model_zoo.get_model(model_name, classes=classes, pretrained=True)\r\n # self._model.reset_class(classes, reuse_weights=[cname for cname in classes if cname in self._model.classes])\r\n if self._model is None or classes != self.classes:\r\n model_name = 'ssd_{}_{}_custom'.format(self.img_size, self.backbone)\r\n self._model = model_zoo.get_model(model_name, classes=classes, pretrained=False, pretrained_base=True,\r\n root=self.temp_path)\r\n with warnings.catch_warnings(record=True):\r\n warnings.simplefilter(\"always\")\r\n self._model.initialize()\r\n self._model.collect_params().reset_ctx(self.ctx)\r\n _, _, _ = self._model(mx.nd.zeros((1, 3, self.img_size, self.img_size), self.ctx))\r\n\r\n self._model.reset_class(classes)\r\n self.classes = classes", "def CreateModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def _create_model(self):\n if torch.cuda.is_available():\n model = torch.jit.load(self.torch_jit).cuda()\n else:\n model = torch.jit.load(self.torch_jit)\n model.eval()\n return model", "def load_model(self):\n self.__model = tf.keras.models.load_model(\n os.path.join(self.model_path, \"model.h5\")\n )\n print(\"[INFO] Model loaded!\")\n\n tok_pth = os.path.join(self.model_path, \"tokenizer.json\")\n with open(tok_pth, \"r\") as f:\n self.__tokenizer = tf.keras\\\n .preprocessing\\\n .text\\\n .tokenizer_from_json(f.read())\n print(\"[INFO] Tokenizer loaded!\")\n\n meta_pth = os.path.join(self.model_path, \"meta.json\")\n with open(meta_pth, \"r\") as f:\n meta = json.load(f)\n self.__title_len = meta[\"title_pad_length\"]\n self.__body_len = meta[\"body_pad_length\"]\n\n self.load_explainer()\n print(\"[INFO] Explainer loaded!\")" ]
[ "0.7996416", "0.7996416", "0.7820359", "0.7744954", "0.76674736", "0.75198305", "0.7464868", "0.7434571", "0.7401831", "0.7381503", "0.73702586", "0.7361736", "0.7338781", "0.73340213", "0.73315054", "0.7304617", "0.7300003", "0.72266036", "0.71483904", "0.70854926", "0.70715237", "0.7039578", "0.70354384", "0.70324033", "0.7015041", "0.6991126", "0.6983229", "0.69426817", "0.69392073", "0.69340634", "0.69132435", "0.690882", "0.6907224", "0.6879529", "0.6870022", "0.68456435", "0.6842941", "0.68420756", "0.68241537", "0.68140763", "0.6809433", "0.6792647", "0.67846245", "0.67579687", "0.6753191", "0.6747825", "0.6747389", "0.6747389", "0.6730244", "0.6730244", "0.6721346", "0.67036974", "0.66951793", "0.6694625", "0.66891104", "0.6688541", "0.66884345", "0.6686949", "0.66855705", "0.66737735", "0.66722953", "0.66656756", "0.666388", "0.6655896", "0.66545445", "0.6644788", "0.66431165", "0.6631341", "0.66308224", "0.66297495", "0.6620472", "0.66198516", "0.66183907", "0.6617122", "0.6611434", "0.66102946", "0.6609511", "0.66059864", "0.660423", "0.65989405", "0.6595753", "0.6592732", "0.6590964", "0.65900934", "0.6585616", "0.658457", "0.6583615", "0.65827626", "0.6582303", "0.65742004", "0.6560934", "0.65570354", "0.6555845", "0.65484697", "0.6542366", "0.6541732", "0.6538267", "0.6537262", "0.65252817", "0.6511684", "0.6505672" ]
0.0
-1
create and eventually load optimizer
def create_optimizer(parameters, optimizer_class, optim_params, model_name='model'): opt = optimizer_class(parameters, **optim_params) if special_parameters.load_model: _load_optimizer(opt, model_name) return opt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_optimizer(self, context, optimizer, host):\n pass", "def _inst_optimizer(self):\n optimizer = Optimizers(self.m_cfg['configs']['lr_politics']['optimizer']).value\n lr_schedule = self.m_cfg['configs']['lr_politics']['lr']\n opt = optimizer(learning_rate=lr_schedule)\n return opt", "def __init__(self, optimizer):\n super(ShardedOptimizer, self).__init__(optimizer, name=\"ShardedOptimizer\")", "def _create_optimizer(self):\n\n with tf.name_scope(\"optimizer\"):\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)", "def create_optimizer(self) -> None:\n # Make sure model is created before we create optimizer\n if self._model is None:\n raise ValueError(\"Model checkpoint must be created before optimizer checkpoint can be loaded.\")\n\n # Select optimizer type\n if self.config.optimizer_type in [OptimizerType.Adam, OptimizerType.AMSGrad]:\n self._optimizer = torch.optim.Adam(self._model.parameters(), self.config.l_rate,\n self.config.adam_betas, self.config.opt_eps, self.config.weight_decay,\n amsgrad=self.config.optimizer_type == OptimizerType.AMSGrad)\n elif self.config.optimizer_type == OptimizerType.SGD:\n self._optimizer = torch.optim.SGD(self._model.parameters(), self.config.l_rate, self.config.momentum,\n weight_decay=self.config.weight_decay)\n elif self.config.optimizer_type == OptimizerType.RMSprop:\n self._optimizer = RMSprop(self._model.parameters(), self.config.l_rate, self.config.rms_alpha,\n self.config.opt_eps,\n self.config.weight_decay, self.config.momentum)\n else:\n raise NotImplementedError(f\"Optimizer type {self.config.optimizer_type.value} is not implemented\")", "def importOptimizer():\n module_path = os.path.join(path, \"optimization\")\n module_path = os.path.join(module_path, \"optimizer.py\")\n optimizer_class = importClass(\"Optimizer\", \"optimizer\", module_path)\n return optimizer_class", "def initialize_optimization(self):\n\n if self.FLAGS.optimizer == \"Adam\" :\n self.solver = tf.train.AdamOptimizer(\n learning_rate = self.learning_rate,\n beta1 = self.FLAGS.beta1,\n beta2 = self.FLAGS.beta2)\n else:\n print(\"ERROR: Cannot handle optimizer type {}!!!\".format(self.FLAGS.optimizer))\n raise RuntimeError\n \n # batch normalization in tensorflow requires this extra dependency\n # this is required to update the moving mean and moving variance variables\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n self.update = self.solver.minimize(self.loss, global_step=self.global_step)", "def update_optimizer(self, context, optimizer, host):\n pass", "def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer, tpu_support=False):\n self.optimize_ops = []\n for loss in self.losses['train']: # TODO Create apropoiate external training scheme\n optimize_op = optimizer_to_use(\n learning_rate=self.learning_rate\n )\n if tpu_support:\n optimize_op = tpu.CrossShardOptimizer(optimize_op)\n optimize_op = optimize_op.minimize(\n loss=loss,\n global_step=tf.train.get_global_step()\n )\n self.optimize_ops.append(optimize_op)\n logging.info('Optimizers built')", "def _init_optimizer(self, optimizer):\n if optimizer == \"rmsprop\":\n self.optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06)\n elif optimizer == \"adagrad\":\n self.optimizer = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06)\n elif optimizer == \"adadelta\":\n self.optimizer = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06)\n elif optimizer == \"adam\":\n self.optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n elif optimizer == \"adamax\":\n self.optimizer = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08) \n elif hasattr(optimizer, __call__):\n self.optimizer = optimizer\n else:\n print \"Error: unsupported optimizer %s\"%optimizer\n sys.exit(0)", "def _init_optimizer(self, optimizer):\n if optimizer == \"rmsprop\":\n self.optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06)\n elif optimizer == \"adagrad\":\n self.optimizer = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06)\n elif optimizer == \"adadelta\":\n self.optimizer = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06)\n elif optimizer == \"adam\":\n self.optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n elif optimizer == \"adamax\":\n self.optimizer = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08) \n elif hasattr(optimizer, __call__):\n self.optimizer = optimizer\n else:\n print \"Error: unsupported optimizer %s\"%optimizer\n sys.exit(0)", "def _load_optimizer(self):\n # loss function\n with tf.variable_scope(\"forward\"):\n self.loss_fwd = tf.nn.seq2seq.sequence_loss(self.dec_outputs_fwd,\n self.labels, self.weights, self.vocab_size)\n\n # optimizer\n # self.optimizer_fwd = tf.train.MomentumOptimizer(self.learning_rate,\n # self.momentum)\n self.optimizer_fwd = tf.train.GradientDescentOptimizer(self.learning_rate)\n self.train_op_fwd = self.optimizer_fwd.minimize(self.loss_fwd)\n\n with tf.variable_scope(\"backward\"):\n self.loss_bwd = tf.nn.seq2seq.sequence_loss(self.dec_outputs_bwd,\n self.labels, self.weights, self.vocab_size)\n\n # optimizer\n # self.optimizer_bwd = tf.train.MomentumOptimizer(self.learning_rate,\n # self.momentum)\n self.optimizer_bwd = tf.train.GradientDescentOptimizer(self.learning_rate)\n self.train_op_bwd = self.optimizer_bwd.minimize(self.loss_bwd)", "def create_optimizer(self, run_configuration, rng=None):\n @functools.partial(jax.jit, static_argnums=(1, 2))\n def create_model(rng, example, model_cls):\n with flax.deprecated.nn.attention.Cache().mutate() as cache_def:\n _, initial_params = model_cls.init(\n rng,\n example,\n cache=cache_def)\n model = flax.deprecated.nn.Model(model_cls, initial_params)\n return model, cache_def\n\n config = self.config\n dataset = run_configuration.dataset_info.dataset\n\n rng = rng if rng is not None else jax.random.PRNGKey(0)\n learning_rate = config.opt.learning_rate\n example = self.as_example(next(iter(dataset)))\n model_cls = run_configuration.model\n model, unused_cache_def = create_model(rng, example, model_cls)\n return optimizer_utils.create_optimizer(model, learning_rate)", "def add_optimizer(self):\n \n with tf.variable_scope(\"optimizer\"):\n\n # Define optimizer and minimize loss\n if self.OPTIM == \"RMSProp\":\n self.optimizer = tf.train.RMSPropOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n \n elif self.OPTIM == \"GD\":\n self.optimizer = tf.train.GradientDescentOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n \n elif self.OPTIM == \"Adam\":\n self.optimizer = tf.train.AdamOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n\n # Merge all summaries for tensorboard\n #self.tbsummaries = tf.summary.merge_all()", "def try_create_optimizer_and_load_from_checkpoint(self) -> bool:\n self.create_optimizer()\n if self.checkpoint_path:\n return self.try_load_checkpoint_for_optimizer()\n return True", "def compile(self, gen_optimizer, disc_optimizer):\n self.gen_optimizer = gen_optimizer\n self.disc_optimizer = disc_optimizer", "def register_optimizer(key, module):\n register(key, module, optimizer_dict)", "def __init__(self, ea_optimizer, is_chief, task_index):\n self._ea_optimizer = ea_optimizer\n self._is_chief = is_chief\n self._task_index = task_index", "def start(config: Config):\n return Optimizer(config).start()", "def load_optimizers(self, epoch):\n for i, optimizer in enumerate(self.optimizers):\n load_filename = '{0}_optimizer_{1}.pth'.format(epoch, i)\n load_path = os.path.join(self.save_dir, load_filename)\n print('loading the optimizer from {0}'.format(load_path))\n state_dict = torch.load(load_path)\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n optimizer.load_state_dict(state_dict)", "def create_model_optimizer(net,alpha):\n optimizer = chainer.optimizers.Adam(alpha=alpha)\n optimizer.setup(net)\n return optimizer", "def _get_optimizer(self):\n raise NotImplementedError", "def create_optimizer(net, optimizer_state_dict, learning_rate, device='cuda'):\n # define optimizer\n optimizer = optim.Adam([{\n 'params': net.net.parameters(),\n 'initial_lr': learning_rate\n }])\n # load optimizer checkpoint if available\n if optimizer_state_dict is not None:\n target_device = 'cpu' if device == 'cpu' else 'cuda'\n # load the optimizer weights\n optimizer.load_state_dict(optimizer_state_dict)\n for state in optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = getattr(v, target_device)()\n return optimizer", "def optimizer_creator(model, config):\n return torch.optim.SGD(model.parameters(), lr=config.get(\"lr\", 1e-4))", "def add_optimizers_to_graph(self):\n with tf.device(self.params.device):\n with self.graph.as_default():\n with tf.compat.v1.variable_scope(\"optimizers\") as scope:\n self.grads_and_vars = list() # [sch_idx][weight_idx]\n self.apply_grads = list() # [sch_idx][weight_idx]\n self.learning_rates = list() # [sch_idx][weight_idx]\n if self.params.optimizer == \"lbfgsb\":\n self.minimizer = None\n #self.minimizer = tfp.optimizer.lbfgs_minimize(\n # value_and_gradients_function=self.loss_value_and_grad,#self.total_loss,\n # initial_position=self.w_init,#self.trainable_variables,\n # max_iterations=self.params.maxiter)\n #self.minimizer = tf.contrib.opt.ScipyOptimizerInterface(self.total_loss,\n # options={\"maxiter\":self.params.maxiter}) # Default method is L-BFGSB\n for schedule_idx, sch in enumerate(self.params.schedule):\n sch_grads_and_vars = list() # [weight_idx]\n sch_apply_grads = list() # [weight_idx]\n sch_lrs = list() # [weight_idx]\n #Construct weight ops\n weight_ops = [self.trainable_variables[weight] for weight in sch[\"weights\"]]\n for w_idx, weight in enumerate(sch[\"weights\"]):\n weight_name = weight.split(\"/\")[-1].split(\":\")[0]\n learning_rates = tf.compat.v1.train.exponential_decay(\n learning_rate=sch[\"weight_lr\"][w_idx],\n global_step=self.global_step,\n decay_steps=sch[\"decay_steps\"][w_idx],\n decay_rate=sch[\"decay_rate\"][w_idx],\n staircase=sch[\"staircase\"][w_idx],\n name=\"annealing_schedule_\"+weight_name)\n sch_lrs.append(learning_rates)\n if self.params.optimizer == \"sgd\":\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rates,\n name=\"grad_optimizer_\"+weight_name)\n elif self.params.optimizer == \"adam\":\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rates, beta1=0.9, beta2=0.99,\n epsilon=1e-07, name=\"adam_optimizer_\"+weight_name)\n elif self.params.optimizer == \"adadelta\":\n optimizer = tf.compat.v1.train.AdadeltaOptimizer(learning_rates, epsilon=1e-07,\n name=\"adadelta_optimizer_\"+weight_name)\n elif self.params.optimizer == \"lbfgsb\":\n optimizer = None\n else:\n assert False, (\"Optimizer \"+self.params.optimizer+\" is not supported.\")\n weight_op = self.trainable_variables[weight]\n sch_grads_and_vars.append(self.compute_weight_gradients(optimizer, weight_op))\n gstep = self.global_step if w_idx == 0 else None # Only increment once\n if self.params.optimizer == \"lbfgsb\": # BFGS doesn't actually need the update op\n if w_idx == 0:\n sch_apply_grads.append(tf.compat.v1.assign_add(self.global_step, 1))\n else:\n sch_apply_grads.append(None)\n else:\n sch_apply_grads.append(optimizer.apply_gradients(sch_grads_and_vars[w_idx],\n global_step=gstep))\n self.learning_rates.append(sch_lrs)\n self.grads_and_vars.append(sch_grads_and_vars)\n self.apply_grads.append(sch_apply_grads)\n self.optimizers_added = True", "def make_optimizer(self):\r\n # parameters = [self.encoder.parameters(), self.decoder.parameters(), self.spec_enc.parameters()]\r\n if self.flags.optim == 'Adam':\r\n op = torch.optim.Adam(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\r\n elif self.flags.optim == 'RMSprop':\r\n op = torch.optim.RMSprop(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\r\n elif self.flags.optim == 'SGD':\r\n op = torch.optim.SGD(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\r\n else:\r\n raise Exception(\"Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben\")\r\n return op", "def initialize(self): \n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.LR)", "def optimizer_setup(model, params):\n if params.optimizer == 'adam':\n if params.freeze_backbone:\n optimizer = optimizer_handler.layer_specific_adam(model, params)\n else:\n optimizer = optimizer_handler.plain_adam(model, params)\n elif params.optimizer == 'sgd':\n if params.freeze_backbone:\n optimizer = optimizer_handler.layer_specific_sgd(model, params)\n else:\n optimizer = optimizer_handler.plain_sgd(model, params)\n\n if params.zero_bn_bias_decay:\n optimizer = zero_wdcay_bn_bias(optimizer)\n\n return optimizer", "def setup_optimizers(self, *args, **kwargs):\n\n # self.optimizers.append(...)\n # self.loss.append(...)\n pass", "def _cook_optimizer(self, \n lr = 0.01, \n optimizer = 'sgd',\n l1_coeff = 0.00001,\n l2_coeff = 0.00001):\n with tf.variable_scope (self.name + '_train') as scope:\n apply_regularizer (name = self.name, var_list = tf.get_collection(\n self.name + '_regularizer_worthy_params'), \n l1_coeff = l1_coeff,\n l2_coeff = l2_coeff )\n self.obj = tf.add_n(tf.get_collection( self.name + '_objectives'), name='objective')\n tf.summary.scalar('total_objective', self.obj)\n\n # Change (supply as arguments) parameters here directly in the code.\n if optimizer == 'sgd': \n self.back_prop = apply_gradient_descent(var_list = tf.get_collection(\n self.name + '_trainable_params'),\n obj = self.obj, learning_rate = lr )\n elif optimizer == 'adagrad': \n self.back_prop = apply_adagrad(var_list = tf.get_collection(\n self.name + '_trainable_params'),\n obj = self.obj, learning_rate = lr ) \n elif optimizer == 'rmsprop':\n self.back_prop = apply_rmsprop(var_list = tf.get_collection(\n self.name + '_trainable_params') ,\n obj = self.obj, learning_rate = lr)\n elif optimizer == 'adam':\n self.back_prop = apply_adam (var_list = tf.get_collection(\n self.name + '_trainable_params') ,\n obj = self.obj, learning_rate = lr )\n else:\n raise Error('Invalid entry to optimizer')", "def optimizer(self):\n return 'sgd'", "def init_optimizer(self, state_dict=None, use_gpu=True):\n if self.args.fix_embeddings:\n self.network.embedder.src_word_embeddings.fix_word_lut()\n self.network.embedder.tgt_word_embeddings.fix_word_lut()\n\n if self.args.optimizer == 'sgd':\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n self.optimizer = optim.SGD(parameters,\n self.args.learning_rate,\n momentum=self.args.momentum,\n weight_decay=self.args.weight_decay)\n\n elif self.args.optimizer == 'adam':\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n self.optimizer = optim.Adam(parameters,\n self.args.learning_rate,\n weight_decay=self.args.weight_decay)\n elif self.args.optimizer == 'adamW':\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n self.optimizer = optim.AdamW(parameters,\n self.args.learning_rate,\n weight_decay=self.args.weight_decay)\n\n else:\n raise RuntimeError('Unsupported optimizer: %s' % self.args.optimizer)\n\n if state_dict is not None:\n self.optimizer.load_state_dict(state_dict)\n # FIXME: temp soln - https://github.com/pytorch/pytorch/issues/2830\n if use_gpu:\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if torch.is_tensor(v):\n state[k] = v.cuda()", "def create_optimizer(hparams):\n\n if hparams.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate=hparams.learning_rate, momentum=hparams.momentum)\n elif hparams.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate=hparams.learning_rate)\n elif hparams.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n hparams.learning_rate)\n elif hparams.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n hparams.learning_rate)\n elif hparams.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n hparams.learning_rate, momentum=hparams.momentum)\n\n return optimizer", "def optimizer_factory(config, model):\n params = model.parameters()\n\n optimizer = config[\"loss\"].get(\"optimizer\", \"Adam\")\n lr = config[\"loss\"].get(\"lr\", 1e-3)\n momentum = config[\"loss\"].get(\"momentum\", 0.9)\n\n if optimizer == \"SGD\":\n return optim.SGD(params, lr=lr, momentum=momentum)\n elif optimizer == \"Adam\":\n return optim.Adam(params, lr=lr)\n else:\n raise NotImplementedError()", "def _create_train_op(self):\n if self.optim_type == 'adagrad':\n self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)\n elif self.optim_type == 'adam':\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n elif self.optim_type == 'rprop':\n self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)\n elif self.optim_type == 'sgd':\n self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n else:\n raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))\n self.train_op = self.optimizer.minimize(self.loss)", "def _create_train_op(self):\n if self.optim_type == 'adagrad':\n self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)\n elif self.optim_type == 'adam':\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n elif self.optim_type == 'rprop':\n self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)\n elif self.optim_type == 'sgd':\n self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n else:\n raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))\n self.train_op = self.optimizer.minimize(self.loss)", "def get_optimizer_cls(name: str):\n return optimizer_registry[name][1]", "def compile_optimizer(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.cfg.learning_rate)\n\n return optimizer", "def setup(self, opt):\n if self.isTrain:\n self.schedulers = [base_function.get_scheduler(optimizer, opt) for optimizer in self.optimizers]\n if not self.isTrain or opt.continue_train:\n self.load_networks(opt.which_iter)", "def add_optimizer(self, optimizer):\n assert isinstance(optimizer, torch.optim.Optimizer)\n setattr(self, 'optimizer'+str(self._optimizer_counter), optimizer)\n self._optimizer_counter += 1\n # optimizer indexing : optimizer 0 is the optimizer for layer 0", "def propose_optimize():\n pass", "def init_optimizer(self, kvstore='local', optimizer='sgd',\n optimizer_params=(('learning_rate', 0.01),), force_init=False):\n assert self.binded and self.params_initialized\n\n if self.optimizer_initialized and not force_init:\n self.logger.warning('optimizer already initialized, ignoring...')\n return\n\n if self._params_dirty:\n self._sync_params_from_devices()\n\n (kvstore, update_on_kvstore) = \\\n mx.model._create_kvstore(kvstore, len(self._context), self._arg_params)\n\n batch_size = self._exec_group.batch_size\n if kvstore and 'dist' in kvstore.type and '_sync' in kvstore.type:\n batch_size *= kvstore.num_workers\n rescale_grad = 1.0 / batch_size\n\n\n idx2name = {}\n if update_on_kvstore:\n idx2name.update(enumerate(self._exec_group.param_names))\n else:\n for k in range(len(self._context)):\n idx2name.update({i*len(self._context)+k: n\n for i, n in enumerate(self._exec_group.param_names)})\n name2idx = {}\n for k, v in idx2name.items():\n if v not in name2idx:\n name2idx[v] = []\n name2idx[v].append(k)\n\n if isinstance(optimizer, str):\n optimizer_params = dict(optimizer_params)\n if 'rescale_grad' not in optimizer_params:\n optimizer_params['rescale_grad'] = rescale_grad\n optimizer = mx.optimizer.create(optimizer,\n sym=self.symbol, param_idx2name=idx2name,\n **optimizer_params)\n else:\n assert isinstance(optimizer, mx.optimizer.Optimizer)\n if optimizer.rescale_grad != rescale_grad:\n #pylint: disable=no-member\n warnings.warn(\n \"Optimizer created manually outside Module but rescale_grad \" +\n \"is not normalized to 1.0/batch_size/num_workers (%s vs. %s). \"%(\n optimizer.rescale_grad, rescale_grad) +\n \"Is this intended?\", stacklevel=2)\n if len(optimizer.idx2name):\n warnings.warn(\"The idx2name of the optimizer is overwrote by ModuleEXT\")\n # overwrite optimizer.idx2name\n optimizer.idx2name = idx2name.copy()\n\n self._param_idx2name = idx2name \n self._param_name2idx = name2idx\n self._optimizer = optimizer\n self._kvstore = kvstore\n self._update_on_kvstore = update_on_kvstore\n self._updater = None\n\n if kvstore:\n if self._compression_params:\n kvstore.set_gradient_compression(self._compression_params)\n # copy initialized local parameters to kvstore\n _initialize_kvstore(kvstore=kvstore,\n param_arrays=self._exec_group.param_arrays,\n arg_params=self._arg_params,\n param_names=self._param_names,\n update_on_kvstore=update_on_kvstore)\n if update_on_kvstore:\n kvstore.set_optimizer(self._optimizer)\n else:\n self._updater = mx.optimizer.get_updater(optimizer)\n\n self.optimizer_initialized = True\n\n if self._preload_opt_states is not None:\n self.load_optimizer_states(self._preload_opt_states)\n self._preload_opt_states = None", "def register_optimizer(name, opt):\r\n if name in predefined_optimizers:\r\n raise ValueError('Optimizer name already taken: %s' % name)\r\n predefined_optimizers[name] = opt", "def _initialize_optimizer(self, optimizer: Union[str, GSTOptimizer]):\n if isinstance(optimizer, str):\n if optimizer == 'linear_inverse':\n self._optimizer = LinearInversionOptimizer()\n elif optimizer == 'mle':\n # self._optimizer = MLEOptimizer()\n raise ArgumentError(\"in GateSetTomography(): The maximum likelihood estimation optimizer \"\n \"will be supported in the next version, \"\n \"Please use the linear inversion optimizer!\")\n else:\n raise ArgumentError(\"in GateSetTomography(): undefined optimizer name {}. \"\n \"Supported optimizer names are: 'linear_inverse' and 'mle'.\".format(optimizer))\n elif isinstance(optimizer, GSTOptimizer):\n self._optimizer = optimizer\n else:\n raise ArgumentError(\"in GateSetTomography: undefined optimizer type!\")\n pass", "def on_stage_start(self, state: _State):\n optimizer = state.get_attr(\n key=\"optimizer\", inner_key=self.optimizer_key\n )\n assert optimizer is not None\n self._optimizer = optimizer", "def build_optimizer(model: nn.Module, args: Namespace) -> Optimizer:\n params = [{'params': model.parameters(), 'lr': args.init_lr, 'weight_decay': 0}]\n\n return Adam(params)", "def instantiate_optimizer(params, scope):\n # Create optimizer map.\n optimizers = {\n \"Adadelta\": tf.keras.optimizers.Adadelta,\n \"Adagrad\": tf.keras.optimizers.Adagrad,\n \"Adam\": tf.keras.optimizers.Adam,\n \"Adamax\": tf.keras.optimizers.Adamax,\n \"Ftrl\": tf.keras.optimizers.Ftrl,\n \"Nadam\": tf.keras.optimizers.Nadam,\n \"RMSprop\": tf.keras.optimizers.RMSprop,\n \"SGD\": tf.keras.optimizers.SGD\n }\n\n # Get optimizer and instantiate it.\n if params[\"{}_optimizer\".format(scope)] == \"Adam\":\n optimizer = optimizers[params[\"{}_optimizer\".format(scope)]](\n learning_rate=params[\"{}_learning_rate\".format(scope)],\n beta_1=params[\"{}_adam_beta1\".format(scope)],\n beta_2=params[\"{}_adam_beta2\".format(scope)],\n epsilon=params[\"{}_adam_epsilon\".format(scope)],\n name=\"{}_{}_optimizer\".format(\n scope, params[\"{}_optimizer\".format(scope)].lower()\n )\n )\n else:\n optimizer = optimizers[params[\"{}_optimizer\".format(scope)]](\n learning_rate=params[\"{}_learning_rate\".format(scope)],\n name=\"{}_{}_optimizer\".format(\n scope, params[\"{}_optimizer\".format(scope)].lower()\n )\n )\n\n return optimizer", "def create_optimizer(optimizer_name, model, config):\n if optimizer_name == 'adadelta':\n return torch.optim.Adadelta(model.parameters(),\n lr=config['adadelta_lr'],\n rho=config['adadelta_rho'],\n weight_decay=config['adadelta_weight_decay'],\n eps=config['adadelta_eps'])\n else:\n raise Exception('Optimizer \\'{}\\' not supported.'.format(optimizer_name))", "def create_optimizer(init_lr, num_train_steps, num_warmup_steps):\n # Implements linear decay of the learning rate.\n learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(\n initial_learning_rate=init_lr,\n decay_steps=num_train_steps,\n end_learning_rate=0.0)\n if num_warmup_steps:\n learning_rate_fn = WarmUp(initial_learning_rate=init_lr,\n decay_schedule_fn=learning_rate_fn,\n warmup_steps=num_warmup_steps)\n optimizer = AdamWeightDecay(\n learning_rate=learning_rate_fn,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[LAYER_NORM_NAME, 'bias'])\n return optimizer", "def build_optimizer(opt_config, learning_rate):\n if opt_config.opt_method == 'SGD':\n print('Using SGD as the optimizer', file=sys.stderr)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n elif opt_config.opt_method == 'Adam':\n print('Using Adam as the optimizer', file=sys.stderr)\n optimizer = tf.train.AdamOptimizer(\n learning_rate, beta1=opt_config.adam_beta1,\n beta2=opt_config.adam_beta2, epsilon=opt_config.adam_epsilon\n )\n else:\n raise ValueError(\n 'Unknown optimization method {0}!'.format(opt_config.opt_method))\n return optimizer", "def get_updater(optimizer):\n return Updater(optimizer)", "def make_optimizer(self, train_var_filter):\n # According from the prototxt in Caffe implement, learning rate must multiply by 10.0 in pyramid module\n fc_list = ['conv5_3_pool1_conv', 'conv5_3_pool2_conv', 'conv5_3_pool3_conv', 'conv5_3_pool6_conv', 'conv6',\n 'conv5_4']\n all_trainable = [v for v in tf.trainable_variables() if\n ('beta' not in v.name and 'gamma' not in v.name) or True]\n fc_trainable = [v for v in all_trainable if v.name.split('/')[0] in fc_list]\n conv_trainable = [v for v in all_trainable if v.name.split('/')[0] not in fc_list] # lr * 1.0\n fc_w_trainable = [v for v in fc_trainable if 'weights' in v.name] # lr * 10.0\n fc_b_trainable = [v for v in fc_trainable if 'biases' in v.name] # lr * 20.0\n assert (len(all_trainable) == len(fc_trainable) + len(conv_trainable))\n assert (len(fc_trainable) == len(fc_w_trainable) + len(fc_b_trainable))\n\n with tf.control_dependencies(self.update_ops):\n opt_conv = tf.train.MomentumOptimizer(self.lr_op, self.momentum)\n opt_fc_w = tf.train.MomentumOptimizer(self.lr_op * 10.0, self.momentum)\n opt_fc_b = tf.train.MomentumOptimizer(self.lr_op * 20.0, self.momentum)\n\n grads = tf.gradients(self.loss, conv_trainable + fc_w_trainable + fc_b_trainable)\n grads_conv = grads[:len(conv_trainable)]\n grads_fc_w = grads[len(conv_trainable): (len(conv_trainable) + len(fc_w_trainable))]\n grads_fc_b = grads[(len(conv_trainable) + len(fc_w_trainable)):]\n\n train_op_conv = opt_conv.apply_gradients(zip(grads_conv, conv_trainable), global_step=self.global_step)\n train_op_fc_w = opt_fc_w.apply_gradients(zip(grads_fc_w, fc_w_trainable))\n train_op_fc_b = opt_fc_b.apply_gradients(zip(grads_fc_b, fc_b_trainable))\n\n self.optimizer = tf.group(train_op_conv, train_op_fc_w, train_op_fc_b)", "def initialize_optimizer(model, args):\n parameters = [p for p in model.parameters() if p.requires_grad]\n if args.optimizer == 'sgd':\n optimizer = optim.SGD(parameters, args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n elif args.optimizer == 'adam':\n optimizer = optim.Adam(parameters, args.learning_rate,\n weight_decay=args.weight_decay)\n elif args.optimizer == 'adamax':\n optimizer = optim.Adamax(parameters, args.learning_rate,\n weight_decay=args.weight_decay)\n elif args.optimizer == 'adagrad':\n optimizer = optim.Adagrad(parameters, args.learning_rate,\n weight_decay=args.weight_decay)\n scheduler = ReduceLROnPlateau(optimizer, 'min', verbose=True)\n return optimizer, scheduler", "def compile(self, optimizer: Union[IOpContainer, Type[IGradientDescent]]):\n pass", "def __configure_optimizer(self, learning_rate):\n if self.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=self.adadelta_rho,\n epsilon=self.opt_epsilon)\n elif self.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=self.adagrad_initial_accumulator_value)\n elif self.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=self.adam_beta1,\n beta2=self.adam_beta2,\n epsilon=self.opt_epsilon)\n elif self.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=self.ftrl_learning_rate_power,\n initial_accumulator_value=self.ftrl_initial_accumulator_value,\n l1_regularization_strength=self.ftrl_l1,\n l2_regularization_strength=self.ftrl_l2)\n elif self.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=self.momentum,\n name='Momentum')\n elif self.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=self.rmsprop_decay,\n momentum=self.rmsprop_momentum,\n epsilon=self.opt_epsilon)\n elif self.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError('Optimizer [%s] was not recognized', self.optimizer)\n return optimizer", "def add_optimizer(self, optimizer):\n assert isinstance(optimizer, torch.optim.Optimizer)\n setattr(self, 'optimizer', optimizer)", "def __init__(self, api_config):\n AbstractOptimizer.__init__(self, api_config)\n \n api_space = BoEI.api_manipulator(api_config) # used for GPyOpt initialization\n\n self.space_x = JointSpace(api_config) # used for warping & unwarping of new suggestions & observations\n\n self.hasCat, self.cat_vec = BoEI.is_cat(api_config)\n \n self.dim = len(self.space_x.get_bounds())\n\n self.objective = GPyOpt.core.task.SingleObjective(None)\n\n self.space = GPyOpt.Design_space(api_space)\n \n self.model = GPyOpt.models.GPModel(optimize_restarts=5,verbose=False)\n \n self.aquisition_optimizer = GPyOpt.optimization.AcquisitionOptimizer(self.space)\n \n \n self.aquisition = AcquisitionEI(self.model, self.space, optimizer=self.aquisition_optimizer, cost_withGradients=None)\n \n self.batch_size = None", "def wrap_optimizer(self, optimizer: Any) -> Any:\n if not self.env.managed_training:\n return optimizer\n\n self.optimizer_initialized = True\n if self.distributed.size == 1:\n return optimizer\n\n check.check_false(\n isinstance(optimizer, str),\n \"Please specify an optimizer object instead of using a string name.\",\n )\n\n hvd.require_horovod_type(\"tensorflow\", \"EstimatorTrialContext.wrap_optimizer was called.\")\n\n # The signature of our horovod optimizer changed after we rebased onto 0.21.\n hvd_sig = inspect.signature(hvd.DistributedOptimizer)\n horovod_kwargs = {\n \"compression\": hvd.compression.Compression.fp16\n if self.fp16_compression\n else hvd.compression.Compression.none,\n \"average_aggregated_gradients\": self.average_aggregated_gradients,\n }\n if \"aggregation_frequency\" in hvd_sig.parameters:\n horovod_kwargs[\"aggregation_frequency\"] = self.aggregation_frequency\n else:\n horovod_kwargs[\"backward_passes_per_step\"] = self.aggregation_frequency\n\n optimizer = hvd.DistributedOptimizer(optimizer, **horovod_kwargs)\n logging.debug(\"Initialized optimizer for distributed and optimized parallel training.\")\n return optimizer", "def init_optimizer(network, config):\n # define optimizer and loss\n if config.optimizer == 'adadelta':\n opt = torch.optim.Adadelta(network.parameters(), lr=config.lr, weight_decay=config.weight_decay)\n elif config.optimizer == 'adam':\n opt = torch.optim.Adam(network.parameters(), lr=config.lr, weight_decay=config.weight_decay)\n elif config.optimizer == 'rmsprop':\n opt = torch.optim.RMSprop(network.parameters(), lr=config.lr, weight_decay=config.weight_decay)\n return opt", "def _initialize_trainer(self):\n self.cost = mse(0., 0.)\n for task_id in self.task_ids.keys():\n self.cost += self.model.get_layer(task_id + '-loss')\n\n opt = Optimizer(self.cost)\n self.optimizer = opt.get_adagrad(self.learning_rate)", "def _init_optimizing(self):\n try:\n model_config = self._conf.get(PredictConstance.BASE_CONFIG,\n PredictConstance.MODEL_CONFIG)\n conf = configparser.ConfigParser()\n conf.read(model_config)\n self._optimizing = optimizing.Optimizing(conf,log_path = self.xeasy_log_path)\n if self._optimizing.init() == runstatus.RunStatus.SUCC:\n return True\n except Exception as err:\n self.managerlogger.logger.error(\"init optimizing error: %s\" % err)\n self.errorlogger.logger.error(\"init optimizing error: \\n %s\" % traceback.format_exc())\n return False", "def _create_networks_and_optimizer(self):\n self.policy_net = DeepQNetwork(self.num_inputs,\n self.hidden_layers, \n self.num_actions).to(device)\n self.target_net = DeepQNetwork(self.num_inputs,\n self.hidden_layers, \n self.num_actions).to(device)\n self._update_target_net()\n \n self.optimizer = optim.Adam(self.policy_net.parameters(), \n lr=self.lr, eps=1e-7)", "def configure_optimizer(learning_rate):\n\tif train_config['optimizer'] == 'adadelta':\n\t\toptimizer = tf.train.AdadeltaOptimizer(learning_rate,\n\t\t rho=train_config['adadelta_rho'],\n\t\t epsilon=train_config['opt_epsilon'])\n\telif train_config['optimizer'] == 'dadgrad':\n\t\toptimizer = tf.train.AdagradDAOptimizer(\n\t\t\tlearning_rate,\n\t\t\tinitial_gradient_squared_accumulator_value=train_config['adagrad_initial_accumulator_value'])\n\telif train_config['optimizer'] == 'adam':\n\t\toptimizer = tf.train.AdamOptimizer(\n\t\t\tlearning_rate,\n\t\t\tbeta1=train_config['adam_beta1'],\n\t\t\tbeta2=train_config['adam_beta2'],\n\t\t\tepsilon=train_config['opt_epsilon'])\n\telif train_config['optimizer'] == 'ftrl':\n\t\toptimizer = tf.train.FtrlOptimizer(\n\t\t\tlearning_rate,\n\t\t\tlearning_rate_power=train_config['ftrl_learning_rate_power'],\n\t\t\tinitial_accumulator_value=train_config['ftrl_initial_accumulator_value'],\n\t\t\tl1_regularization_strength=train_config['ftrl_l1'],\n\t\t\tl2_regularization_strength=train_config['ftrl_l2'])\n\telif train_config['optimizer'] == 'momentum':\n\t\toptimizer = tf.train.MomentumOptimizer(\n\t\t\tlearning_rate,\n\t\t\tmomentum=train_config['momentum'],\n\t\t\tname='Momentum')\n\telif train_config['optimizer'] == 'rmsprop':\n\t\toptimizer = tf.train.RMSPropOptimizer(\n\t\t\tlearning_rate,\n\t\t\tdecay=train_config['rmsprop_decay'],\n\t\t\tmomentum=train_config['rmsprop_momentum'],\n\t\t\tepsilon=train_config['opt_epsilon'])\n\telif train_config['optimizer'] == 'sgd':\n\t\toptimizer = tf.train.GradientDescentOptimizer(learning_rate)\n\telse:\n\t\traise ValueError('Optimizer [%s] was not recognized' % train_config['optimizer'])\n\treturn optimizer", "def build_optimizer(\n self, lr: Union[tf.keras.optimizers.schedules.LearningRateSchedule,\n float]):\n\n optimizer_dict = self._optimizer_config.as_dict()\n optimizer_dict['learning_rate'] = lr\n\n optimizer = OPTIMIZERS_CLS[self._optimizer_type](**optimizer_dict)\n return optimizer", "def fetch_optimizer(args, model):\n optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)\n\n scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps + 100,\n pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')\n\n return optimizer, scheduler", "def make_optimizer(model, lr, opt, weight_decay):\n optimizers = {\n 'adam': optim.Adam,\n 'adamax': optim.Adamax,\n 'rmsprop': optim.RMSprop,\n }\n\n optimizer = optimizers[opt](model.parameters(), lr=lr,\n weight_decay=weight_decay)\n\n return optimizer", "def configure_optimizer(learning_rate):\n if hp.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=hp.adadelta_rho,\n epsilon=hp.opt_epsilon)\n elif hp.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=hp.adagrad_initial_accumulator_value)\n elif hp.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=hp.adam_beta1,\n beta2=hp.adam_beta2,\n epsilon=hp.opt_epsilon)\n elif hp.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=hp.ftrl_learning_rate_power,\n initial_accumulator_value=hp.ftrl_initial_accumulator_value,\n l1_regularization_strength=hp.ftrl_l1,\n l2_regularization_strength=hp.ftrl_l2)\n elif hp.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=hp.momentum,\n name='Momentum')\n elif hp.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=hp.rmsprop_decay,\n momentum=hp.rmsprop_momentum,\n epsilon=hp.opt_epsilon)\n elif hp.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError('Optimizer [%s] was not recognized', hp.optimizer)\n return optimizer", "def _load_state_dict(optimizer, state: dict) -> None:\n if is_scheduler(optimizer):\n optimizer.load_state_dict(state[\"scheduler\"])\n optimizer.optimizer.load_state_dict(state[\"optimizer\"])\n else:\n optimizer.load_state_dict(state)", "def setup_optimiser(self):\n self.optimiser = ScheduledOptim(\n optim.Adam(\n filter(lambda x: x.requires_grad, self.model.parameters()),\n betas=(0.9, 0.98), eps=1e-09, lr=self.opt.learning_rate),\n self.opt.d_model, self.opt.n_warmup_steps)\n if self.opt.verbose:\n print(\"[Info] optimiser configured.\")", "def dist_optimizer(config, optimizer):\n build_strategy, exec_strategy = create_strategy(config)\n\n dist_strategy = DistributedStrategy()\n dist_strategy.execution_strategy = exec_strategy\n dist_strategy.build_strategy = build_strategy\n\n dist_strategy.nccl_comm_num = 1\n dist_strategy.fuse_all_reduce_ops = True\n dist_strategy.fuse_grad_size_in_MB = 16\n optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)\n\n return optimizer", "def setup_optims(self):\n lr = self.train_config['lr']\n b1 = self.train_config['b1']\n b2 = self.train_config['b2']\n weight_decay = self.train_config['weight_decay']\n self.opt = torch.optim.Adam(self.network.parameters(), lr=lr, betas=(b1, b2),\n weight_decay=weight_decay)", "def _create_train_op(self):\n self.lr = self.learning_rate\n # global_step = tf.train.get_or_create_global_step()\n learning_rate = tf.constant(value=self.learning_rate, shape=[], dtype=tf.float32)\n learning_rate =tf.train.exponential_decay(learning_rate,self.global_step,2*self.num_warm_up,0.96,staircase=True,name=\"exponential_decay\")\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n if self.num_warm_up:\n global_steps_int = tf.cast(self.global_step, tf.int32)\n warmup_steps_int = tf.constant(self.num_warm_up, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float / warmup_steps_float\n warmup_learning_rate = self.learning_rate * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n self.current_learning_rate = learning_rate\n if self.optim_type == 'adagrad':\n self.optimizer = tf.train.AdagradOptimizer(self.lr)\n elif self.optim_type == 'adam':\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)\n elif self.optim_type == 'rprop':\n self.optimizer = tf.train.RMSPropOptimizer(self.lr)\n elif self.optim_type == 'sgd':\n self.optimizer = tf.train.GradientDescentOptimizer(self.lr)\n elif self.optim_type == \"bert\":\n self.optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=0.01, beta_1=0.9,\n beta_2=0.999, epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n else:\n raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))\n\n self.logger.info(\"applying optimize %s\" % self.optim_type)\n if self.clip_weight:\n # clip_weight\n tvars = tf.trainable_variables()\n grads = tf.gradients(self.loss, tvars)\n grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.max_norm_grad)\n grad_var_pairs = zip(grads, tvars)\n train_op = self.optimizer.apply_gradients(grad_var_pairs, name='apply_grad', global_step=self.global_step)\n new_global_step = self.global_step + 1\n train_op = tf.group(train_op, [self.global_step.assign(new_global_step)])\n self.train_op = train_op\n else:\n self.train_op = self.optimizer.minimize(self.loss, global_step=self.global_step)", "def initialize(self, *args):\n return _SALOMERuntime.OptimizerAlgASync_initialize(self, *args)", "def define_model_and_optimizer(self):\r\n print(\"* Defining model and optimizer.\", flush=True)\r\n job_dir = self.C.job_dir\r\n\r\n if self.C.restart:\r\n print(\"-- Loading model from previous saved state.\", flush=True)\r\n self.restart_epoch = util.get_restart_epoch()\r\n self.model = torch.load(f\"{job_dir}model_restart_{self.restart_epoch}.pth\")\r\n\r\n print(\r\n f\"-- Backing up as \"\r\n f\"{job_dir}model_restart_{self.restart_epoch}_restarted.pth.\",\r\n flush=True,\r\n )\r\n shutil.copyfile(\r\n f\"{job_dir}model_restart_{self.restart_epoch}.pth\",\r\n f\"{job_dir}model_restart_{self.restart_epoch}_restarted.pth\",\r\n )\r\n\r\n else:\r\n print(\"-- Initializing model from scratch.\", flush=True)\r\n self.model = models.initialize_model()\r\n\r\n self.restart_epoch = 0\r\n\r\n start_epoch = self.restart_epoch + 1\r\n end_epoch = start_epoch + self.C.epochs\r\n\r\n print(\"-- Defining optimizer.\", flush=True)\r\n self.optimizer = torch.optim.Adam(\r\n params=self.model.parameters(),\r\n lr=self.C.init_lr,\r\n weight_decay=self.C.weight_decay,\r\n )\r\n\r\n return start_epoch, end_epoch", "def _create_algorithm(algo_name, algo_options, origin):\n if origin == \"nlopt\":\n algo = pg.algorithm(pg.nlopt(solver=algo_name))\n for option, val in algo_options.items():\n setattr(algo.extract(pg.nlopt), option, val)\n elif origin == \"pygmo\":\n pygmo_uda = getattr(pg, algo_name)\n algo_options = algo_options.copy()\n if \"popsize\" in algo_options:\n del algo_options[\"popsize\"]\n algo = pg.algorithm(pygmo_uda(**algo_options))\n\n return algo", "def _configure_optimizer(learning_rate):\n if FLAGS.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=FLAGS.adadelta_rho,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)\n elif FLAGS.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=FLAGS.adam_beta1,\n beta2=FLAGS.adam_beta2,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=FLAGS.ftrl_learning_rate_power,\n initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,\n l1_regularization_strength=FLAGS.ftrl_l1,\n l2_regularization_strength=FLAGS.ftrl_l2)\n elif FLAGS.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=FLAGS.momentum,\n name='Momentum')\n elif FLAGS.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=FLAGS.rmsprop_decay,\n momentum=FLAGS.rmsprop_momentum,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)\n return optimizer", "def init_optimizer(self, state_dict=None, use_gpu=True):\r\n param_optimizer = list(self.network.named_parameters())\r\n\r\n # There seems to be something that we can't\r\n param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]\r\n\r\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\r\n optimizer_grouped_parameters = [\r\n {'params': [p for n, p in param_optimizer\r\n if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\r\n {'params': [p for n, p in param_optimizer\r\n if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]\r\n\r\n num_train_optimization_steps = int(\r\n self.args.num_train_examples / self.args.batch_size / self.args.gradient_accumulation_steps) \\\r\n * self.args.num_epochs\r\n\r\n self.optimizer = AdamW(optimizer_grouped_parameters,\r\n lr=self.args.learning_rate)\r\n self.scheduler = WarmupLinearSchedule(self.optimizer,\r\n warmup_steps=self.args.warmup_steps,\r\n t_total=num_train_optimization_steps)\r\n\r\n if state_dict is not None:\r\n self.optimizer.load_state_dict(state_dict)\r\n # FIXME: temp soln - https://github.com/pytorch/pytorch/issues/2830\r\n if use_gpu:\r\n for state in self.optimizer.state.values():\r\n for k, v in state.items():\r\n if isinstance(v, torch.Tensor):\r\n state[k] = v.cuda()", "def load(self, path):\n file = open(path, 'rb')\n state = pickle.load(file)\n\n self.opt_state = optimizers.pack_optimizer_state(state)", "def _build_optimizers(self):\r\n self._optimize_ops = []\r\n all_trainable_variables = tf.trainable_variables()\r\n all_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n all_reg_losses = tf.losses.get_regularization_losses()\r\n for spec in self._learning_schedule:\r\n optimize_ops = []\r\n update_ops = []\r\n loss_terms = spec['loss_terms_to_optimize']\r\n reg_losses = []\r\n assert isinstance(loss_terms, dict)\r\n for loss_term_key, prefixes in loss_terms.items():\r\n assert loss_term_key in self.loss_terms['train'].keys()\r\n variables_to_train = []\r\n for prefix in prefixes:\r\n variables_to_train += [\r\n v for v in all_trainable_variables\r\n if v.name.startswith(prefix)\r\n ]\r\n update_ops += [\r\n o for o in all_update_ops\r\n if o.name.startswith(prefix)\r\n ]\r\n reg_losses += [\r\n l for l in all_reg_losses\r\n if l.name.startswith(prefix)\r\n ]\r\n\r\n optimizer_class = tf.train.AdamOptimizer\r\n optimizer = optimizer_class(\r\n learning_rate=self.learning_rate_multiplier * spec['learning_rate'],\r\n # beta1=0.9,\r\n # beta2=0.999,\r\n )\r\n final_loss = self.loss_terms['train'][loss_term_key]\r\n if len(reg_losses) > 0:\r\n final_loss += tf.reduce_sum(reg_losses)\r\n with tf.control_dependencies(update_ops):\r\n gradients, variables = zip(*optimizer.compute_gradients(\r\n loss=final_loss,\r\n var_list=variables_to_train,\r\n aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N,\r\n ))\r\n # gradients, _ = tf.clip_by_global_norm(gradients, 5.0) # TODO: generalize\r\n optimize_op = optimizer.apply_gradients(zip(gradients, variables))\r\n optimize_ops.append(optimize_op)\r\n self._optimize_ops.append(optimize_ops)\r\n logger.info('Built optimizer for: %s' % ', '.join(loss_terms.keys()))", "def create_train_op(self, model, learning_rate):\n if self.optim_type == 'adagrad':\n optimizer = torch.optim.Adagrad(model.parameters(), lr=learning_rate, weight_decay=self.args.weight_decay)\n elif self.optim_type == 'adadelta':\n optimizer = torch.optim.Adadelta(model.parameters(), lr=learning_rate, weight_decay=self.args.weight_decay)\n elif self.optim_type == 'adam':\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=self.args.weight_decay)\n elif self.optim_type == 'rprop':\n optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate, weight_decay=self.args.weight_decay)\n elif self.optim_type == 'sgd':\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=self.args.momentum,\n weight_decay=self.args.weight_decay)\n else:\n raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))\n return optimizer", "def init_optimizer(self, optimizer_params, force_init=False):\n optimizer_backbone = DistributedOptimizer(\n mx.optimizer.SGD(**optimizer_params))\n self.backbone_module.init_optimizer('local',\n optimizer_backbone,\n force_init=force_init)", "def __init__(self,\n weight_decay,\n global_step,\n max_matrix_size=768,\n gbar_decay=0.0,\n gbar_weight=1.0,\n mat_gbar_decay=1.0,\n mat_gbar_weight=1.0,\n learning_rate=1.0,\n svd_interval=1,\n precond_update_interval=1,\n epsilon=1e-4,\n alpha=0.5,\n use_iterative_root=False,\n use_locking=False,\n name=\"ShampooW\"):\n super(ShampooWOptimizer, self).__init__(\n weight_decay,\n global_step=global_step,\n max_matrix_size=max_matrix_size,\n gbar_decay=gbar_decay,\n gbar_weight=gbar_weight,\n mat_gbar_decay=mat_gbar_weight,\n learning_rate=learning_rate,\n svd_interval=svd_interval,\n precond_update_interval=precond_update_interval,\n epsilon=epsilon,\n alpha=alpha,\n use_iterative_root=use_iterative_root,\n use_locking=use_locking,\n name=name)", "def get_optimizer(module, recipe):\n\n # <module>, \"stages__*__optimizer\"\n return get_instance(module.parameters(), **recipe)", "def _set_optimizer(self):\n\n if self.optimizer_name == 'Adam':\n self.optimizer = optim.Adam(self.net.parameters(),\n lr=self.learning_rate,\n betas=self.betas,\n eps=1e-8,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'SGD':\n self.optimizer = optim.SGD(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'SGD_Nesterov':\n self.optimizer = optim.SGD(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay,\n nesterov=True)\n elif self.optimizer_name == 'RMSprop':\n self.optimizer = optim.Adagrad(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'Adagrad':\n self.optimizer = optim.Adagrad(self.net.parameters(),\n lr=self.learning_rate,\n weight_decay=self.weight_decay)\n else:\n print(\"Optimizer '\" + self.optimizer_name + \"' not implemented.\")", "def optimizerFactory(hybridModel, params):\n\n if params['optim']['name'] == 'adam':\n return torch.optim.Adam(\n hybridModel.parameters(),lr=params['optim']['lr'], \n betas=(0.9, 0.999), eps=1e-08, \n weight_decay=params['optim']['weight_decay'], amsgrad=False\n )\n elif params['optim']['name'] == 'sgd': \n return torch.optim.SGD(\n hybridModel.parameters(), lr=params['optim']['lr'], \n momentum=params['optim']['momentum'], weight_decay=params['optim']['weight_decay']\n )\n \n else:\n raise NotImplemented(f\"Optimizer {params['optim']['name']} not implemented\")", "def build_optimizer(optimizer_config, params, name=None):\n\n if optimizer_config.name == 'rms_prop_optimizer':\n\n optimizer = paddle.optimizer.RMSProp(\n parameters = params,\n learning_rate=_get_base_lr_by_lr_scheduler(optimizer_config.learning_rate),\n rho=optimizer_config.decay,\n momentum=optimizer_config.momentum_optimizer_value,\n epsilon=optimizer_config.epsilon,\n weight_decay=optimizer_config.weight_decay)\n\n if optimizer_config.name =='momentum_optimizer':\n\n optimizer = paddle.optimizer.SGD(\n parameters = params,\n learning_rate=_get_base_lr_by_lr_scheduler(optimizer_config.learning_rate),\n weight_decay=optimizer_config.weight_decay)\n\n if optimizer_config.name =='adam_optimizer':\n\n optimizer = paddle.optimizer.Adam(\n parameters = params,\n learning_rate=_get_base_lr_by_lr_scheduler(optimizer_config.learning_rate),\n weight_decay=optimizer_config.weight_decay)\n\n if optimizer is None:\n raise ValueError('Optimizer %s not supported.' % optimizer_config.name)\n\n if optimizer_config.use_moving_average:\n raise ValueError('paddle don\\'t support moving average')\n if name is None:\n # assign a name to optimizer for checkpoint system\n optimizer.name = optimizer_config.name\n else:\n optimizer.name = name\n return optimizer", "def __init__(self,\n opt,\n num_worker,\n ea_custom_getter,\n communication_period=10,\n moving_rate=None,\n rho=None,\n use_locking=True,\n name='ElasticAverageOptimizer'):\n super(ElasticAverageOptimizer, self).__init__(use_locking, name)\n self._opt = opt\n self._num_worker = num_worker\n self._period = communication_period\n self._local_map = ea_custom_getter._local_map\n self._global_map = ea_custom_getter._global_map\n\n if moving_rate is None:\n self._moving_rate = self.BETA / communication_period / num_worker\n else:\n self._moving_rate = moving_rate\n if rho is None:\n self._rho = self._moving_rate / self._opt._learning_rate\n else:\n self._rho = rho\n\n self._local_step = variable_scope.get_variable(\n initializer=0,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES],\n name='local_step')\n self._opt._prepare()", "def _create_optimizer(\n config: Union[None, Mapping] = None\n) -> keras.optimizers.Optimizer:\n\n if config is None:\n return keras.optimizers.Adam()\n\n algorithm = config['algorithm'].lower()\n if algorithm == 'adam':\n return keras.optimizers.Adam()\n elif algorithm == 'sgd':\n return keras.optimizers.SGD(\n momentum=config.get('momentum', 0.),\n nesterov=config.get('nesterov', False)\n )\n elif algorithm == 'radam':\n return tfa.optimizers.RectifiedAdam()\n else:\n raise RuntimeError(\n 'Unsupported optimizer \"{}\".'.format(config['algorithm'])\n )", "def setup(self, opt):\n if self.isTrain:\n self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]\n if not self.isTrain or opt.continue_train:\n load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch\n self.load_networks(load_suffix)\n self.print_networks(opt.verbose)", "def init_optimizer_for_pruning(cls, optimizer):\n assert (cls.__optimizer is None), \"ASP has initialized optimizer already.\"\n assert (cls.__calculate_mask is not None), \"Called ASP.init_optimizer_for_pruning before ASP.init_model_for_pruning.\"\n\n # store pointer to original optimizer step method\n cls.__optimizer = optimizer\n cls.__optimizer.__step = optimizer.step\n\n def __step(opt_self, *args, **kwargs):\n # prune gradients before step method\n with torch.no_grad():\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n p.grad.mul_(mask)\n # call original optimizer step method\n rval = opt_self.__step(*args, **kwargs)\n # prune parameters after step method\n with torch.no_grad():\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n p.mul_(mask)\n return rval\n cls.__optimizer.step = types.MethodType(__step, cls.__optimizer)", "def _configure_optimizer(learning_rate):\n if FLAGS.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=FLAGS.adadelta_rho,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)\n elif FLAGS.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=FLAGS.adam_beta1,\n beta2=FLAGS.adam_beta2,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=FLAGS.ftrl_learning_rate_power,\n initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,\n l1_regularization_strength=FLAGS.ftrl_l1,\n l2_regularization_strength=FLAGS.ftrl_l2)\n elif FLAGS.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=FLAGS.momentum,\n name='Momentum')\n elif FLAGS.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=FLAGS.rmsprop_decay,\n momentum=FLAGS.rmsprop_momentum,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)\n return optimizer", "def configure_optimizer(learning_rate):\r\n if FLAGS.optimizer == 'adadelta':\r\n optimizer = tf.train.AdadeltaOptimizer(learning_rate, \r\n rho=FLAGS.adadelta_rho,epsilon=FLAGS.opt_epsilon)\r\n elif FLAGS.optimizer == 'adagrad':\r\n optimizer = tf.train.AdagradOptimizer(learning_rate,\r\n initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)\r\n elif FLAGS.optimizer == 'adam':\r\n optimizer = tf.train.AdamOptimizer(learning_rate,\r\n beta1=FLAGS.adam_beta1,beta2=FLAGS.adam_beta2,epsilon=FLAGS.opt_epsilon)\r\n elif FLAGS.optimizer == 'ftrl':\r\n optimizer = tf.train.FtrlOptimizer(learning_rate,learning_rate_power=FLAGS.ftrl_learning_rate_power,\r\n initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,\r\n l1_regularization_strength=FLAGS.ftrl_l1,l2_regularization_strength=FLAGS.ftrl_l2)\r\n elif FLAGS.optimizer == 'momentum':\r\n optimizer = tf.train.MomentumOptimizer(learning_rate,\r\n momentum=FLAGS.momentum,name='Momentum')\r\n elif FLAGS.optimizer == 'rmsprop':\r\n optimizer = tf.train.RMSPropOptimizer(learning_rate,decay=FLAGS.rmsprop_decay,\r\n momentum=FLAGS.rmsprop_momentum,epsilon=FLAGS.opt_epsilon)\r\n elif FLAGS.optimizer == 'sgd':\r\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)\r\n else:\r\n raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)\r\n return optimizer", "def _configure_optimizer(learning_rate):\n if FLAGS.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=FLAGS.adadelta_rho,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)\n elif FLAGS.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=FLAGS.adam_beta1,\n beta2=FLAGS.adam_beta2,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=FLAGS.ftrl_learning_rate_power,\n initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,\n l1_regularization_strength=FLAGS.ftrl_l1,\n l2_regularization_strength=FLAGS.ftrl_l2)\n elif FLAGS.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=FLAGS.momentum,\n use_nesterov=True,\n name='Momentum')\n elif FLAGS.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=FLAGS.rmsprop_decay,\n momentum=FLAGS.momentum,\n epsilon=FLAGS.opt_epsilon)\n elif FLAGS.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)\n return optimizer", "def set_optimizer(self, probe):\n if 'weight_decay' in self.args['probe_training']:\n weight_decay = self.args['probe_training']['weight_decay']\n else:\n weight_decay = 0\n if 'scheduler_patience' in self.args['probe_training']:\n scheduler_patience = self.args['probe_training']['scheduler_patience']\n else:\n scheduler_patience = 0\n \n learning_rate = 0.001 if not 'learning_rate' in self.args['probe_training'] else\\\n self.args['probe_training']['learning_rate']\n \n scheduler_factor = 0.5 if not 'scheduler_factor' in self.args['probe_training'] else\\\n self.args['probe_training']['scheduler_factor']\n\n self.optimizer = optim.Adam(probe.parameters(), lr=learning_rate, weight_decay=weight_decay)\n self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,\n mode='min',\n factor=scheduler_factor,\n patience=scheduler_patience)", "def set_optimizer(self, probe):\n if 'weight_decay' in self.args['probe_training']:\n weight_decay = self.args['probe_training']['weight_decay']\n else:\n weight_decay = 0\n if 'scheduler_patience' in self.args['probe_training']:\n scheduler_patience = self.args['probe_training']['scheduler_patience']\n else:\n scheduler_patience = 0\n \n learning_rate = 0.001 if not 'learning_rate' in self.args['probe_training'] else\\\n self.args['probe_training']['learning_rate']\n \n scheduler_factor = 0.5 if not 'scheduler_factor' in self.args['probe_training'] else\\\n self.args['probe_training']['scheduler_factor']\n\n self.optimizer = optim.Adam(probe.parameters(), lr=learning_rate, weight_decay=weight_decay)\n self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,\n mode='min',\n factor=scheduler_factor,\n patience=scheduler_patience)", "def getOptimizer():\n if configuration['others']['verbose']:\n sys.stdout.write(reports.seperator + reports.tabs +\n 'Optimizer : Adagrad, learning rate = {0}'.format(configuration['mlp']['lr'])\n + reports.seperator)\n return optimizers.Adagrad(lr=configuration['mlp']['lr'], epsilon=None, decay=0.0)", "def configure_optimizers(self):\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n if self.optimizer == \"adamw\":\n optimizer = AdamW(optimizer_grouped_parameters,\n betas=(0.9, 0.98), # according to RoBERTa paper\n lr=self.args.lr,\n eps=self.args.adam_epsilon,)\n elif self.optimizer == \"torch.adam\":\n optimizer = torch.optim.AdamW(optimizer_grouped_parameters,\n lr=self.args.lr,\n eps=self.args.adam_epsilon,\n weight_decay=self.args.weight_decay)\n else:\n raise ValueError(\"Optimizer type does not exist.\")\n num_gpus = len([x for x in str(self.args.gpus).split(\",\") if x.strip()])\n t_total = (len(self.train_dataloader()) // (self.args.accumulate_grad_batches * num_gpus) + 1) * self.args.max_epochs\n warmup_steps = int(self.args.warmup_proportion * t_total)\n if self.args.lr_scheduler == \"onecycle\":\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer, max_lr=self.args.lr, pct_start=float(warmup_steps/t_total),\n final_div_factor=self.args.final_div_factor,\n total_steps=t_total, anneal_strategy='linear')\n elif self.args.lr_scheduler == \"linear\":\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)\n elif self.args.lr_scheulder == \"polydecay\":\n if self.args.lr_mini == -1:\n lr_mini = self.args.lr / self.args.polydecay_ratio\n else:\n lr_mini = self.args.lr_mini\n scheduler = get_polynomial_decay_schedule_with_warmup(optimizer, warmup_steps, t_total, lr_end=lr_mini)\n else:\n raise ValueError\n return [optimizer], [{\"scheduler\": scheduler, \"interval\": \"step\"}]", "def _make_optimizer(use_adam, learning_rate):\n if use_adam:\n ret = tf.train.AdamOptimizer(learning_rate=learning_rate)\n else:\n ret = tf.train.MomentumOptimizer(learning_rate=learning_rate,\n momentum=0.95,\n use_nesterov=True)\n return ret", "def __init__(self, state_dim, action_dim, learning_rate, weight_decay):\n self.dynamics_net = ForwardModel(state_dim, action_dim)\n self.rewards_net = RewardModel(state_dim, action_dim)\n self.done_net = RewardModel(state_dim, action_dim)\n\n self.dyn_optimizer = tfa_optimizers.AdamW(\n learning_rate=learning_rate, weight_decay=weight_decay)\n self.reward_optimizer = tfa_optimizers.AdamW(\n learning_rate=learning_rate, weight_decay=weight_decay)\n self.done_optimizer = tfa_optimizers.AdamW(\n learning_rate=learning_rate, weight_decay=weight_decay)", "def main():\n options = get_options()\n dataset, test, fs = get_dataset(options)\n\n def eval_all(folder):\n \"\"\"evaluates all optimizers and all models on given dataset, and saves\n info pictures to folder\n\n Args:\n folder: folder to save results\n \"\"\"\n optimizers = [\n tf.keras.optimizers.Adadelta(learning_rate=0.01),\n tf.keras.optimizers.Adagrad(learning_rate=0.002),\n tf.keras.optimizers.Adam(learning_rate=0.0001),\n tf.keras.optimizers.Adamax(learning_rate=0.0005),\n tf.keras.optimizers.Ftrl(learning_rate=0.002),\n tf.keras.optimizers.Nadam(learning_rate=0.001),\n tf.keras.optimizers.RMSprop(learning_rate=0.0005),\n tf.keras.optimizers.SGD(learning_rate=0.003),\n ]\n\n epochs = [\n 500, 120, 80, 150, 300, 60, 100, 500\n ]\n\n biased_randomized = [\n (models.DefaultModel, False),\n (models.BiasedModel, False),\n (models.NeuralModel, False),\n (models.DefaultModel, True),\n (models.BiasedModel, True),\n (models.NeuralModel, True),\n ]\n\n for optimizer, n_epochs in zip(optimizers, epochs):\n for model, rndmz in biased_randomized:\n eval_optimizer(folder,\n model,\n optimizer,\n n_epochs,\n rndmz)\n\n def eval_complicated(folder):\n optimizers = [\n tf.keras.optimizers.Adadelta,\n tf.keras.optimizers.Adagrad,\n tf.keras.optimizers.Adam,\n tf.keras.optimizers.Adamax,\n tf.keras.optimizers.Ftrl,\n tf.keras.optimizers.Nadam,\n tf.keras.optimizers.RMSprop,\n tf.keras.optimizers.SGD,\n ]\n\n type_eph_lrate = [\n (models.Deep2Hidden, 15, 0.00003),\n (models.Deep11Hidden, 15, 0.00003)\n ]\n\n for opt in optimizers:\n for model, epochs, lrate in type_eph_lrate:\n eval_optimizer(folder,\n model,\n opt(learning_rate=lrate),\n epochs,\n True)\n\n def eval_big(folder):\n optimizers_filter = [\n (tf.keras.optimizers.Adadelta(learning_rate=1e-3), 200),\n (tf.keras.optimizers.Adagrad(learning_rate=1e-3), 200),\n (tf.keras.optimizers.SGD(learning_rate=1e-3), 200)\n ]\n optimizers_layer = [\n (tf.keras.optimizers.Adam(learning_rate=1e-5), 30),\n (tf.keras.optimizers.Adamax(learning_rate=1e-5), 30),\n (tf.keras.optimizers.Ftrl(learning_rate=1e-5), 30),\n (tf.keras.optimizers.Nadam(learning_rate=1e-5), 30),\n (tf.keras.optimizers.RMSprop(learning_rate=1e-5), 30)\n ]\n optimizers_deep = [\n (tf.keras.optimizers.Adam(learning_rate=3e-6), 15),\n (tf.keras.optimizers.Adamax(learning_rate=3e-6), 15),\n (tf.keras.optimizers.RMSprop(learning_rate=3e-6), 15)\n ]\n mdls = [\n models.DefaultModel,\n models.NeuralModel,\n models.Deep1Hidden\n ]\n\n for (opt, model) in zip((optimizers_filter,\n optimizers_layer,\n optimizers_deep), mdls):\n for (optimizer, epochs) in opt:\n randomize = False if model is models.DefaultModel else True\n eval_optimizer(folder,\n model,\n optimizer,\n epochs,\n randomize)\n\n def eval_optimizer(folder,\n model, optimizer, epochs, randomize):\n \"\"\"Evaluates given model on given dataset\n\n Evaluates model on given dataset, optimizes result by optimizer, and saves\n info image to given folder\n\n Args:\n folder: folder to save info images\n model: tf.keras.Model model for evaluation\n optimizer: tf.keras optimizer\n epochs (int): epochs of training\n randomize (bool): tandomize initial weights and biases\n\n \"\"\"\n class2name = {\n models.DefaultModel: \"default\",\n models.BiasedModel: \"biased\",\n models.NeuralModel: \"neural\",\n models.NeuralSTD: \"neuralSTD\",\n models.Deep1Hidden: \"deep1h\",\n models.Deep2Hidden: \"deep2h\",\n models.Deep11Hidden: \"deep1_1\"\n }\n\n # prepare for training\n layer_len = len(dataset.take(1).as_numpy_iterator().next()[0][0])\n optimizer_conf = optimizer.get_config()\n fname = \"/%s_%s_%deph_%.5flrate_%s\" % \\\n (class2name[model],\n optimizer_conf[\"name\"],\n epochs,\n optimizer_conf[\"learning_rate\"],\n \"rnd\" if randomize else \"nornd\")\n\n pic_name = folder + fname + \".png\"\n file_name = folder + \"/models\" + fname + \".model\"\n model_obj = model(layer_len, randomize)\n model_obj.compile(optimizer=optimizer, loss=models.SimpleLoss())\n\n # prepare data from test dataset for result visualization\n train_sample = None\n no_train_sample = None\n samples = []\n labels = []\n for features, label in test.as_numpy_iterator():\n samples.append(features)\n labels.append(label)\n if train_sample is None and label == 1:\n train_sample = features\n if no_train_sample is None and label == 0:\n no_train_sample = features\n samples = np.array(samples)\n labels = np.array(labels, dtype=np.bool)\n # save untrained classification, for result visualization\n untrained_predicted_labels = model_obj(samples).numpy()\n # train model\n history = model_obj.fit(x=dataset, epochs=epochs)\n train_filtered = model_obj.filter_single(train_sample)\n no_train_filtered = model_obj.filter_single(no_train_sample)\n predicted_labels = model_obj(samples).numpy()\n\n # result visualization and saving\n fig = plt.figure(figsize=(15., 7.))\n loss_ax = fig.add_subplot(3, 1, 1)\n loss_ax.set_title(\"ход обучения\")\n loss_ax.set_xlabel(\"эпоха\")\n loss_ax.set_ylabel(\"ф-я потерь\")\n sig_untrained_ax = fig.add_subplot(3, 2, 3)\n sig_untrained_ax.set_title(\"примеры сигналов\")\n sig_untrained_ax.set_xlabel(\"время, сек\")\n sig_untrained_ax.set_ylabel(\"ускорение, мкм/сек^2\")\n sig_trained_ax = fig.add_subplot(3, 2, 4)\n sig_trained_ax.set_title(\"отфильтрованные сигналы\")\n sig_trained_ax.set_xlabel(\"время, сек\")\n sig_trained_ax.set_ylabel(\"ускорение, мкм/сек^2\")\n # sig_trained_ax.set_ylim(-1, 1)\n label_untrained_ax = fig.add_subplot(3, 2, 5)\n label_untrained_ax.set_title(\"классификация необученной моделью\")\n label_untrained_ax.set_xlabel(\"вероятность, что сигнал от поезда\")\n label_trained_ax = fig.add_subplot(3, 2, 6)\n label_trained_ax.set_title(\"классификация обученной моделью\")\n label_trained_ax.set_xlabel(\"вероятность, что сигнал от поезда\")\n\n loss_ax.plot(history.history[\"loss\"])\n train_ax_label, = sig_untrained_ax.plot(\n np.linspace(0, len(train_sample)/fs, len(train_sample)),\n train_sample,\n \"g\", label=\"сигнал с поездом\")\n no_train_ax_label, = sig_untrained_ax.plot(\n np.linspace(0, len(no_train_sample)/fs, len(no_train_sample)),\n no_train_sample,\n \"r\", label=\"сигнал без поезда\")\n sig_untrained_ax.legend(handles=[train_ax_label, no_train_ax_label])\n train_ax_label, = sig_trained_ax.plot(\n np.linspace(0, len(train_filtered)/fs, len(train_filtered)-1),\n train_filtered[1:],\n \"g\", label=\"сигнал с поездом\")\n no_train_ax_label, = sig_trained_ax.plot(\n np.linspace(0, len(no_train_filtered)/fs, len(no_train_filtered)-1),\n no_train_filtered[1:],\n \"r\", label=\"сигнал без поезда\")\n sig_trained_ax.legend(handles=[train_ax_label, no_train_ax_label])\n train_ax_label = label_untrained_ax.scatter(\n untrained_predicted_labels[labels],\n np.array(range(0, len(labels)))[labels],\n color='green', marker='.', label=\"сигнал с поездом\")\n no_train_ax_label = label_untrained_ax.scatter(\n untrained_predicted_labels[np.invert(labels)],\n np.array(range(0, len(labels)))[np.invert(labels)],\n color='red', marker='.', label=\"сигнал без поезда\")\n label_untrained_ax.legend(handles=[train_ax_label, no_train_ax_label])\n train_ax_label = label_trained_ax.scatter(\n predicted_labels[labels],\n np.ma.array(range(0, len(labels)))[labels],\n color='green', marker='.', label=\"сигнал с поездом\")\n no_train_ax_label = label_trained_ax.scatter(\n predicted_labels[np.invert(labels)],\n np.array(range(0, len(labels)))[np.invert(labels)],\n color='red', marker='.', label=\"сигнал без поезда\")\n label_trained_ax.legend(handles=[train_ax_label, no_train_ax_label])\n fig.tight_layout(w_pad=3, h_pad=2,\n rect=[0.0225, 0.0225, 0.95, 0.95])\n #plt.show()\n plt.savefig(pic_name)\n with open(file_name, \"w\") as f:\n f.write(str(model_obj))\n\n for i in range(0, 20):\n path = \"tmp/%i\" % i\n if not os.path.exists(\"%s/models\" % path):\n os.makedirs(\"%s/models\" % path)\n eval_optimizer(\n path,\n models.DefaultModel,\n tf.keras.optimizers.Adagrad(learning_rate=0.001),\n 150,\n False,\n )" ]
[ "0.7574701", "0.7027126", "0.69701713", "0.6691302", "0.65252584", "0.6491573", "0.6486863", "0.64255583", "0.6424441", "0.6415458", "0.6415458", "0.6378634", "0.63784504", "0.6371093", "0.63059115", "0.62917596", "0.62874264", "0.62415004", "0.62381065", "0.6213669", "0.6209486", "0.6206022", "0.6184173", "0.61689", "0.61479706", "0.61222535", "0.6090236", "0.60844785", "0.6043663", "0.60423636", "0.60380155", "0.60363835", "0.60320354", "0.6029986", "0.60127556", "0.60127556", "0.601252", "0.60020596", "0.60007155", "0.59951764", "0.59868985", "0.5975388", "0.59656405", "0.5945517", "0.5941238", "0.5940823", "0.5935825", "0.5933749", "0.5932869", "0.5923651", "0.5888106", "0.5887817", "0.588138", "0.58744144", "0.5873962", "0.587209", "0.586774", "0.58585393", "0.58499295", "0.5840504", "0.5838777", "0.5821486", "0.58174586", "0.58015937", "0.5793503", "0.57801986", "0.57683355", "0.57623863", "0.57582974", "0.5751769", "0.5750599", "0.57482976", "0.57469624", "0.57426727", "0.5735447", "0.5734059", "0.57208586", "0.57134336", "0.57129216", "0.57121915", "0.57121307", "0.57099986", "0.5706464", "0.57040995", "0.5703962", "0.56900567", "0.5672342", "0.56552213", "0.5653173", "0.5649503", "0.5647113", "0.56408226", "0.56383675", "0.5634109", "0.5634109", "0.5632261", "0.563045", "0.5621649", "0.5618304", "0.56178004" ]
0.6705017
3
change state of the model
def load_checkpoint(model, model_name='model', validation_id=None): path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True) _load_model(model.module if type(model) is torch.nn.DataParallel else model, model_name, path=path, reload=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_state( self ):", "def __change_state(self, state):\n self.state = state", "def _change_state(self, state):\r\n self.table_entry.state = state\r\n self.creator_admin.save_model(self.request, self.table_entry, None, True)", "def set_state(self):\n self.able = not self.able\n self.save()", "def __setstate__(self, state):\n self.__dict__.update(state)\n for y in ['strains', 'alleles', 'base_cobra_model']:\n for x in getattr(self, y):\n x._model = self\n if not hasattr(self, \"name\"):\n self.name = None", "def set_state(self, state: int):", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def state(self, state):\n self._state = state\n self._fitted = True # if the state is set, we assume the model is now fitted", "def on_state_change(self, new_state):\n self.state = new_state", "def updateModel(self):\n pass", "def update_model(self):\n pass", "def update(self):\n self._state = 23", "def update(self):\n self._state = self._state", "def __setstate__(self, state):\n for i, j in state.items():\n setattr(self, i, j)\n self.describer_model = _load_model(self.name)", "def __setstate__(self, state):\n for i, j in state.items():\n setattr(self, i, j)\n self.describer_model = _load_model(self.name)", "def change_state(self):\n new_state = 0 if self.state.state == 1 else 1\n answer = UsbHost.send_query(self.state.ser, \"SetState\", str(self.state.device_id), new_state)\n if answer in wrong_answers:\n error_message(\"Не удалось сменить состояние\")\n self.statusbar.showMessage(answer_translate[answer])\n else:\n self.statusbar.clearMessage()\n self.state.state = new_state\n if new_state == 1:\n self.set_auto_active()\n if new_state == 0:\n self.set_hand_active()", "def changeState(self, node, name, state):", "def set_state(self,s):\n self.state = s", "def update_model(self):\n pass # TODO: Implement this.", "def set_state(self, new_state):\n if new_state == self.state:\n return\n\n assert new_state in [x[0] for x in self.STATES]\n self.state = new_state\n # FIXME, inquire why the model is saved here\n # (update_fields precludes using this for new models)", "def setState(self, state):\n self.state = state", "def update(self):\r\n\r\n self.target.load_state_dict(self.model.state_dict())\r\n self.target.eval()", "def change_state(self,state):\n if self.__currentState:\n self.__currentState.stop()\n \n try:\n idler=self[state]\n except KeyError:\n raise \"%s is not a state of %s\" % (state,self)\n \n self.__currentState=idler()\n self.__currentState.idle()\n self.__currentState=None", "def _update_model(self, new_state, data):\n if new_state:\n if (brightness := data) is not None:\n self._brightness = brightness\n\n # _brightness is not defined when called from super\n try:\n self._state = self._brightness > 0\n except AttributeError:\n self._state = True\n else:\n self._state = False", "def change_state(self):\n transitions = self.transition_map[self.current_state]\n self.current_state = select_from_probability_dict(random(),transitions)", "def set_state(self, new_state):\n self.state = new_state", "def update_state(self, context):\n pass", "def SetState(self, new_state):\r\n\r\n self.state = new_state", "def set_state(self, state):\n\n self.model = self.model_creator(self.config)\n self.epoch = state[\"epoch\"]\n self.model.set_weights(state[\"weights\"])", "def save(self, *args, **kwargs):\n if self.state: self.state.save()", "def update_to_state(self, game_state):\n pass", "def state(self, state):\n self._state = state", "def state_changed(self, oldstate, newstate, event, *args, **kwargs):", "def state_changed(self, oldstate, newstate, event, *args, **kwargs):", "def set_state(self,state):\n self.__state = state", "def __setstate__(self, state):\n self.__dict__.update(state)\n\n # Load and update all functionality related to Keras\n import os\n from art import DATA_PATH\n from keras.models import load_model\n\n full_path = os.path.join(DATA_PATH, state['model_name'])\n model = load_model(str(full_path))\n\n self._model = model\n self._initialize_params(model, state['_use_logits'], state['_input_layer'], state['_output_layer'],\n state['_custom_activation'])", "def set_state(self, state):\n self.state = state", "def update_state(self, new_state):\n self.__state = new_state", "def _set_state(self, state):\n #print(\"** set state from %d to %d\" % (self.state, state))\n self.state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def __setstate__(self, state):\n params, weights = state\n #self.set_params(**params)\n #self.ready()\n self._set_weights(weights)", "def __setstate__(self, state):\n params, weights = state\n #self.set_params(**params)\n #self.ready()\n self._set_weights(weights)", "def __setstate__(self, state):\n self.__dict__.update(state)", "def update_state(self, dstate):\n pass", "def state(self, state: _State) -> None:\n prev_data = self._state.data\n self._state = state.with_data(prev_data)", "def save_model(self, request, obj, form, change):\n obj.revise()", "def set_state(self, value):\n self.state = value", "def updateState(self):\n QtGui.QLineEdit.setText(self, self._state[0])", "def modelChanged(self) -> None:\n ...", "def state(self):\n pass", "def change_state(self, timestamp, state):\n\t\tself.timestamp = timestamp\n\t\tself.state = state", "def set_state(self, state):\n return self.update(current_state=state)", "def update_state(self, *args, **kwargs):\n raise NotImplementedError('Must be implemented in subclasses.')", "def update(self):\n self._is_on = self._is_on", "def test_update_state(self):\n pass", "def __setstate__(self, state):\n\n self.list = state", "def __setstate__(self, state):\n\n self.list = state", "def reset_states(self):\n self.model.reset_states()", "def act(self, state):\n return", "def model_state_update(model, time_t, state_controller, input_f16):\n pass", "def __setstate__(self, state):\n return None", "def set_state(self, state):\n self.history = state", "def change():", "def changeState(self):\n if self._state:\n self._state = False\n else:\n self._state = True\n return self._state", "def reset_model(self):\n raise NotImplementedError", "def reset_model(self):\n raise NotImplementedError", "def reset_model(self):\n raise NotImplementedError", "def reset_to(self, state):\n should_ret = False\n if \"model\" in state:\n self.reset()\n xml = postprocess_model_xml(state[\"model\"])\n self.env.reset_from_xml_string(xml)\n self.env.sim.reset()\n if not self._is_v1:\n # hide teleop visualization after restoring from model\n self.env.sim.model.site_rgba[self.env.eef_site_id] = np.array([0., 0., 0., 0.])\n self.env.sim.model.site_rgba[self.env.eef_cylinder_id] = np.array([0., 0., 0., 0.])\n if \"states\" in state:\n self.env.sim.set_state_from_flattened(state[\"states\"])\n self.env.sim.forward()\n should_ret = True\n\n if \"goal\" in state:\n self.set_goal(**state[\"goal\"])\n if should_ret:\n # only return obs if we've done a forward call - otherwise the observations will be garbage\n return self.get_observation()\n return None", "def update(self):\r\n self._state = self._dev.state", "def setState( self, cCtrlName, nState ):\n self.setControlModelProperty( cCtrlName, \"State\", nState )", "def assign_state(self, state):\n raise NotImplementedError()", "def sync_state_changed(self, state):\n return", "def __setstate__(self, state):\r\n\r\n \"\"\"# Support adding a new member not previously defined in the class\r\n if 'new_member' not in state:\r\n self.new_member = \"new value\"\r\n self.__dict__.update(state)\"\"\"\r\n\r\n \"\"\" # Support removing old members not in new version of class\r\n if 'old_member' in state:\r\n # If you want: do something with the old member\r\n del state['old_member']\r\n self.__dict__.update(state) \"\"\"\r\n\r\n del state['_background_image']\r\n\r\n if '_background_image_data' not in state:\r\n print \"Detected old version of saved file!\"\r\n self._background_image_data = QtCore.QByteArray()\r\n\r\n\r\n if isinstance(state['_features'], list):\r\n for feature in state['_features']:\r\n self._append_feature(feature)\r\n del state['_features']\r\n\r\n self.__init__()\r\n self.__dict__.update(state)\r\n\r\n if isinstance(state['_features'], dict):\r\n\r\n for _id, feature in state['_features'].iteritems():\r\n if feature.get_feature_type() == 'Fuse':\r\n self._fuse_tree_item_model.addChild(feature, None)", "def update(self, *args, **kwargs) -> None:\n self.update_state(args[0])\n super().update(*args, **kwargs)", "def change_data(self):\n\n if self.changed is not True:\n self.changed = True\n print('True')", "async def refresh_entity_state(self):", "def change_task_state(self, new_state):\n self.task_state = new_state", "def set_state(self, state: int):\n self.state = state", "def setstate(self,name,state):\n if (name not in KFNode.names):\n print ' state name ',name,' not in KNode!'\n self.states[name]=state.copy()\n self.status = name\n return", "def __init__(self):\n self.update_state()", "def save_state(self):\n pass", "def dummy():\n\t\t\tself.edit = True", "def update(self, t):\n self.state.send(t)", "def manualState(self, tfid, state):\n self.trafficLights.get(int(tfid)).setState(state)\n self.trafficLights.get(int(tfid)).updateState()", "def setState(self, state):\n \"\"\" Anticipate needing this for adding lazy loading and support for data sets too large to hold in memory \"\"\"\n \n self.state = state", "def state(self, state: str) -> None:", "def _localSetState(self,pdict):\n super()._localSetState(pdict)\n self.transition = pdict.pop('transition')\n self.steadyStatePb = pdict.pop('steadyStatePb')", "def state(self):\n raise NotImplementedError", "def test_update_state1(self):\n pass" ]
[ "0.7737057", "0.75527465", "0.7345793", "0.7311264", "0.72797084", "0.71178675", "0.70558363", "0.7049051", "0.70343417", "0.701413", "0.6992956", "0.69799733", "0.69711614", "0.6911453", "0.6911453", "0.6906396", "0.6853698", "0.68437946", "0.6841781", "0.67774165", "0.677045", "0.67602533", "0.6751895", "0.67405814", "0.6718926", "0.66871995", "0.66819954", "0.66557306", "0.6648994", "0.66441995", "0.6625621", "0.6614812", "0.65895873", "0.65895873", "0.65881133", "0.65244085", "0.65207845", "0.6512305", "0.6501265", "0.6489792", "0.6489792", "0.6489792", "0.6489792", "0.6489792", "0.6489792", "0.6489792", "0.6489792", "0.6489792", "0.6489792", "0.6489792", "0.6489792", "0.6489792", "0.647648", "0.647648", "0.6448488", "0.6436149", "0.64234126", "0.6405199", "0.63961345", "0.636906", "0.63572925", "0.63390857", "0.63354903", "0.63306874", "0.63233715", "0.6317771", "0.6315249", "0.6307607", "0.6307607", "0.6298585", "0.62897134", "0.6278875", "0.6267905", "0.6265865", "0.6250233", "0.62433666", "0.62374556", "0.62374556", "0.62374556", "0.6217218", "0.6209685", "0.6206285", "0.6201095", "0.6199647", "0.61923647", "0.61846906", "0.61826026", "0.61693805", "0.61664826", "0.6163036", "0.61602247", "0.61438864", "0.612326", "0.6113504", "0.61110437", "0.6108549", "0.6108054", "0.6105503", "0.6100193", "0.60810685", "0.6071078" ]
0.0
-1
save checkpoint (optimizer and model)
def save_checkpoint(model, optimizer=None, model_name='model', validation_id=None): path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True) print_debug('Saving checkpoint: ' + path) model = model.module if type(model) is torch.nn.DataParallel else model checkpoint = { 'model_state_dict': model.state_dict() } if optimizer is not None: checkpoint['optimizer_state_dict'] = optimizer.state_dict() torch.save(checkpoint, path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_model_checkpoint(model, optimizer, global_step, epoch_info, file_name):\n output = {\n \"model\" : model.state_dict(),\n \"optimizer\" : optimizer.state_dict(),\n \"global_step\" : global_step + 1,\n \"epoch_info\" : epoch_info\n }\n torch.save(output, file_name)", "def save(self, checkpoint) -> None:\r\n self.model.save(checkpoint)", "def save_checkpoint(self, model, optimizers):\n\n def _save(path, model, optimizers):\n if not os.path.exists(path):\n os.makedirs(path)\n # TODO: put everything on CPU first\n torch.save(model.state_dict(), os.path.join(path, 'model.ckpt'))\n torch.save(tuple([optimizer.opt.state_dict() for optimizer in optimizers]),\n os.path.join(path, 'opt.ckpt'))\n\n if (self.epoch % self._save_iter) == 0:\n # we're at a save iteration\n ckpt_path = os.path.join(self.log_path, 'checkpoints', str(self.epoch))\n _save(ckpt_path, model, optimizers)\n\n if self._best_epoch:\n # overwrite the best model\n ckpt_path = os.path.join(self.log_path, 'checkpoints', 'best')\n _save(ckpt_path, model, optimizers)\n self._best_epoch = False", "def save_checkpoint(self):\n \n if not os.path.isdir(self.path + '/checkpoint/'):\n os.makedirs(self.path + '/checkpoint/')\n\n if self.saver == None:\n with self.graph.as_default():\n self.saver = tf.train.Saver(tf.global_variables())\n\n self.saver.save(self.session, self.path + '/checkpoint/model.ckpt')", "def checkpoint(self):\n save()", "def save_checkpoint(filename, epoch, model, optimizer=None, best_score=0):\n torch.save({\n 'model' : model.state_dict(),\n 'optim' : optimizer.state_dict() if optimizer is not None else None,\n 'epoch' : epoch,\n 'best_score' : best_score\n }, filename)", "def save_checkpoint(self, checkpoint_path='checkpoint.pth'):\n # Move the model back to the cpu so it can be loaded onto machines\n # without gpu's as well.\n self.model.to('cpu')\n\n checkpoint = {\n 'model_architecture': self.model_architecture,\n 'input_size': self.input_size,\n 'output_size': self.output_size,\n 'hidden_layers': self.hidden_layers,\n 'learn_rate': self.learn_rate,\n 'drop_p': self.drop_p,\n 'class_to_idx': self.model.class_to_idx,\n 'current_epoch': self.model.current_epoch,\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'model_state_dict': self.model.state_dict()\n }\n torch.save(checkpoint, checkpoint_path)", "def save_checkpoint(model, is_best, filename='./model/checkpoint.pth.tar'):\n if is_best:\n torch.save(model.state_dict(), filename) # save checkpoint\n else:\n print (\"=> Validation Accuracy did not improve\")", "def save_checkpoint(model, save_path):\n torch.save(model.state_dict(), save_path)", "def save_checkpoint(self, model):\n # print(f\"save model {self.save_model_path}\")\n torch.save(model.state_dict(), self.save_model_path)", "def save(self):\n\n if self.ckpt_manager is not None:\n save_path = self.ckpt_manager.save()\n print(\"Saved checkpoint at: {}\".format(save_path))\n else:\n print(\"There is no checkpoint manager supplied for saving the \"\n \"network weights, optimizer, or other trackables.\")\n print(\"Therefore these will not be saved and the training will \"\n \"start from default values in the future.\")\n print(\"Consider using a checkpoint manager to save the network \"\n \"weights and optimizer.\")", "def save_checkpoint_manual(model: LFADS, path: str):\n model_wts = [v.numpy() for v in model.trainable_variables]\n optim_wts = model.optimizer.get_weights()\n checkpoint = {\"model\": model_wts, \"optimizer\": optim_wts}\n with open(path, \"wb\") as fout:\n pickle.dump(checkpoint, fout)", "def save_checkpoint(filename, model, state=None):\n if not state:\n torch.save(model.state_dict(), os.path.join('checkpoints/', filename))\n else:\n _state = {\n 'epoch': state['epoch'],\n 'state_dict': state['state_dict'].state_dict(),\n 'optimizer': state['optimizer'].state_dict()\n }\n\n torch.save(_state, os.path.join('checkpoints/', filename))", "def save_checkpoint(self, name):\n path = os.path.join(self.params.dump_path, '%s.pth' % name)\n logger.info(\"Saving %s to %s ...\" % (name, path))\n\n data = {\n 'epoch': self.epoch,\n 'best_metrics': self.scores\n }\n\n logger.warning(\"Saving model parameters ...\")\n data['model'] = self.encoder.model.state_dict()\n data['classifier'] = self.proj\n data['dico_id2word'] = self.data['dico'].id2word\n data['dico_word2id'] = self.data['dico'].word2id\n data['dico_counts'] = self.data['dico'].counts\n # print(self.encoder.pretrain_params)\n data['params'] = self.encoder.pretrain_params.update({k: v for k, v in self.params.__dict__.items()})\n\n torch.save(data, path)", "def save_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Save checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)", "def save_checkpoint(args,state, is_best, filename=\"checkpoint.pth.tar\"):\n directory = \"runs/%s-net/\" % (args.name)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\n epoch = state['epoch']\n\n filename = directory + filename\n torch.save(state, filename)\n\n if is_best:\n shutil.copyfile(filename, \"runs/%s-net/\" % (args.name) + \"model_best.pth.tar\")\n\n if epoch==0 or epoch==2:\n shutil.copyfile(filename, \"runs/%s-net/\" % (args.name) + \"model_epoch_%d.pth.tar\" % epoch )", "def save_checkpoint(model, path):\n\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Basic details\n checkpoint = {\n 'class_to_idx': model.class_to_idx,\n 'idx_to_class': model.idx_to_class,\n 'epochs': model.epochs,\n }\n\n # Extract the final classifier and the state dictionary\n if model_name == 'vgg16':\n # Check to see if model was parallelized\n if multi_gpu:\n checkpoint['classifier'] = model.module.classifier\n checkpoint['state_dict'] = model.module.state_dict()\n else:\n checkpoint['classifier'] = model.classifier\n checkpoint['state_dict'] = model.state_dict()\n\n elif model_name == 'resnet50':\n if multi_gpu:\n checkpoint['fc'] = model.module.fc\n checkpoint['state_dict'] = model.module.state_dict()\n else:\n checkpoint['fc'] = model.fc\n checkpoint['state_dict'] = model.state_dict()\n\n # Add the optimizer\n checkpoint['optimizer'] = model.optimizer\n checkpoint['optimizer_state_dict'] = model.optimizer.state_dict()\n\n # Save the data to the path\n torch.save(checkpoint, path)", "def saveCheckpoint(self):\n time_stamp = time.strftime('%Y%m%d%H%M%S', time.gmtime())\n state_filename = os.path.join(self.saving_dir, 'checkpoint.' + time_stamp + '.pth.tar')\n mem_filename = os.path.join(self.saving_dir, 'memory.' + time_stamp + '.pth.tar')\n state = self.getSavingState()\n memory = {\n 'memory': self.memory\n }\n torch.save(state, state_filename)\n torch.save(memory, mem_filename)", "def save_checkpoint(tag, params, model):\r\n os.makedirs(os.path.join(\"saved_models\", params.path), exist_ok=True)\r\n state = {\r\n 'training_id': params.training_id,\r\n 'global_step': model.global_step,\r\n 'model': model.state_dict(),\r\n 'optimizers': [optimizer.state_dict() for optimizer in model.optimizers]\r\n }\r\n fn = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n torch.save(state, fn)", "def save_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n os.makedirs(model_dir, exist_ok=True)\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n model_dict = {'net_state_dict': self.net.state_dict(),\n 'use_cuda': self.use_cuda}\n\n print(\"Saving model to {}\".format(model_file))\n torch.save(model_dict, model_file)", "def save_checkpoint(state, is_best, epoch, args, filename='checkpoint.pth'):\n if not os.path.exists(args.save_folder):\n os.makedirs(args.save_folder)\n filename = args.save_folder + str(epoch) + '_' + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, args.save_folder + 'model_best.pth')", "def _save_checkpoint(self, epoch, is_best=False):\n arch = type(self.model).__name__\n state = {\n 'arch': arch,\n 'epoch': epoch,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'config': self.config\n }\n filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))\n torch.save(state, filename)\n self.logger.info(\"Saving checkpoint: {} ...\".format(filename))\n if is_best:\n best_path = str(self.checkpoint_dir / 'model_best.pth')\n torch.save(state, best_path)\n self.logger.info(\"Saving current best: model_best.pth ...\")", "def save_checkpoint(self, fname, save_optimizer=True):\n # -- Set the network to the full MultiHead_Module network to save everything in the class not only the current model -- #\n self.network = self.mh_network\n\n # -- Use parent class to save checkpoint for MultiHead_Module model consisting of self.model, self.body and self.heads -- #\n super().save_checkpoint(fname, save_optimizer)\n\n # -- Set the flag in already_trained_on -- #\n if not self.already_trained_on[str(self.fold)]['checkpoint_should_exist']:\n # -- Set the flag to True -- #\n self.already_trained_on[str(self.fold)]['checkpoint_should_exist'] = True\n # -- Add the current head keys for restoring (should be in correct order due to OrderedDict type of heads) -- #\n self.already_trained_on[str(self.fold)]['tasks_at_time_of_checkpoint'] = list(self.mh_network.heads.keys())\n # -- Add the current active task for restoring -- #\n self.already_trained_on[str(self.fold)]['active_task_at_time_of_checkpoint'] = self.mh_network.active_task\n # -- Save the updated dictionary as a json file -- #\n save_json(self.already_trained_on, join(self.trained_on_path, self.extension+'_trained_on.json'))\n\n # -- Reset network to the assembled model to continue training -- #\n self.network = self.mh_network.model", "def save_model_checkpoint(base_name, model, ep, opt):\n # Save only the model params\n model_name = os.path.join(base_name, \"i3d_ep\"+str(ep)+\"_\"+opt+\".pt\")\n\n torch.save(model.state_dict(), model_name)\n print(\"Model saved to disk... : {}\".format(model_name))", "def save_checkpoint(self) -> Dict[str, Union[Dict[str, torch.Tensor], dict]]:\n if isinstance(self.model, nn.DataParallel) or isinstance(self.model, nn.parallel.DistributedDataParallel):\n model = self.model.module.state_dict()\n else:\n model = self.model.state_dict()\n\n checkpoint = {\n \"model_state_dict\": deepcopy(model),\n \"optimizer_state_dict\": deepcopy(self.optimizer.state_dict()),\n }\n return checkpoint", "def save_checkpoint(self, model_path=None):\n # TODO: include new params based on ConfigEnum\n if not os.path.isdir(path_checkpoints_dir):\n os.mkdir(path_checkpoints_dir)\n if model_path is None:\n model_path = os.path.join(path_checkpoints_dir, f\"{self.experiment_id}.pth\")\n print(f\"saved the model at {model_path}\") \n model_state_dict = self.model.state_dict()\n if self.optimizer is not None:\n opt_state_dict = self.optimizer.state_dict()\n else:\n opt_state_dict = None\n if self.scheduler is not None:\n sch_state_dict = self.scheduler.state_dict()\n else:\n sch_state_dict = None\n \n if self.scaler is not None:\n amp_grad_scaler = self.scaler.state_dict()\n else:\n amp_grad_scaler = None\n\n model_dict = {}\n model_dict[\"state_dict\"] = model_state_dict\n model_dict[\"optimizer\"] = opt_state_dict\n model_dict[\"scheduler\"] = sch_state_dict\n model_dict['scaler'] = amp_grad_scaler\n model_dict['image_size'] = self.image_size\n model_dict['device'] = self.device\n model_dict['fp16'] = self.fp16\n model_dict['accumulate_grad_steps'] = self.accumulate_grad_steps\n\n model_dict['experiment_id'] = self.experiment_id\n model_dict['experiment_tag'] = self.experiment_tag\n\n model_dict['seed'] = self.seed\n\n model_dict['train_batch_size'] = self.train_batch_size\n model_dict['valid_batch_size'] = self.valid_batch_size\n model_dict['test_batch_size'] = self.test_batch_size\n model_dict['dataloader_num_workers'] = self.dataloader_num_workers\n model_dict['train_dataloader_shuffle'] = self.train_dataloader_shuffle\n\n model_dict['optimizer_type'] = self.optimizer_type\n model_dict['optimizer_params'] = self.optimizer_params\n\n model_dict['scheduler_type'] = self.scheduler_type\n model_dict['scheduler_params'] = self.scheduler_params\n model_dict['step_scheduler_after'] = self.step_scheduler_after\n model_dict['step_scheduler_metric'] = self.step_scheduler_metric\n\n model_dict['compute_train_loss_after'] = self.compute_train_loss_after\n model_dict['compute_train_metric_after'] = self.compute_train_metric_after\n model_dict['compute_valid_loss_after'] = self.compute_valid_loss_after\n model_dict['compute_valid_metric_after'] = self.compute_valid_metric_after\n\n model_dict['training_stopping_criteria'] = self.training_stopping_criteria\n model_dict['stopping_criteria_params'] = self.stopping_criteria_params\n model_dict['max_epoch'] = self.max_epoch\n model_dict['train_on_all_data'] = self.train_on_all_data\n model_dict['validate_after'] = self.validate_after\n model_dict['validation_steps'] = self.validation_steps\n model_dict['run_lr_range_test'] = self.run_lr_range_test\n model_dict['sleep_in_epochs'] = self.sleep_in_epochs\n model_dict['sleep_time'] = self.sleep_time\n model_dict['checkpoint_epochs'] = self.checkpoint_epochs\n\n model_dict['_best_score'] = self._best_score\n model_dict['_current_score'] = self._current_score\n model_dict['_counter'] = self._counter\n\n model_dict['metrics'] = self.metrics\n model_dict['current_epoch'] = self.current_epoch\n model_dict['current_train_batch'] = self.current_train_batch\n model_dict['current_valid_batch'] = self.current_valid_batch\n\n model_dict['num_train_samples'] = self.num_train_samples\n model_dict['num_train_iterations'] = self.num_train_iterations\n model_dict['checkpoint_snapshot'] = self.checkpoint_snapshot \n torch.save(model_dict, model_path)", "def saveCheckpoint(acc, epoch, model, train_hist):\r\n print('Saving..')\r\n state = {\r\n 'model': model,\r\n 'acc': acc,\r\n 'epoch': epoch,\r\n 'rng_state': torch.get_rng_state(),\r\n 'train_hist': train_hist\r\n }\r\n if not os.path.isdir('checkpoint'): # save to checkpoint directory\r\n os.mkdir('checkpoint')\r\n torch.save(state, './checkpoint/ckpt' + '_' + str(epoch+1))", "def save_checkpoint(state, is_best, filename='checkpoint/chpt.tar'):\n if is_best:\n print (\"=> Saving a new best\")\n torch.save(state, filename) # save checkpoint\n else:\n print (\"=> Validation Accuracy did not improve\")", "def checkpoint():", "def save_model(self, checkpoint_path, epoch):\n self.saver.save(self.sess, checkpoint_path, global_step = epoch)", "def save_checkpoint(self, filename=None):\n filename = os.path.join(self.args.checkpoint_dir, filename)\n state = {\n 'epoch': self.current_epoch + 1,\n 'iteration': self.current_iter,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'best_MIou':self.best_MIou\n }\n torch.save(state, filename)", "def save_checkpoint(state, model_name=None):\n \n if not model_name: model_name = f\"model_date_{date_time_str}.pth\"\n torch.save(state, osj(out_path, model_name))", "def checkpoint(self, epoch, losses, path):\n dct = {'epoch': epoch, \n 'losses': losses, \n 'model_state_dict': self.TrajectoryAutoencoder.state_dict()}\n torch.save(dct, path)", "def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n directory = \"runs/%s/\"%(args.name)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'runs/%s/'%(args.name) + 'model_best.pth.tar')", "def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n directory = \"runs/%s/\" % (args.name)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'runs/%s/' % (args.name) + 'model_best.pth.tar')", "def save_checkpoint(state, filename):\n print (\"=> Saving a new best\")\n torch.save(state, filename) # save checkpoint", "def save_checkpoint(self, checkpoint_info):\n torch.save(checkpoint_info, os.path.join(self.checkpoint_path, self.checkpoint_file))", "def save_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n os.makedirs(checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n torch.save(self.model.state_dict(), path)", "def save(self, model, ema_model, optimizer, epoch, step, best_wer,\n is_best=False):\n rank = 0\n if dist.is_initialized():\n dist.barrier()\n rank = dist.get_rank()\n\n if rank != 0:\n return\n\n # Checkpoint already saved\n if not is_best and epoch in self.tracked:\n return\n\n unwrap_ddp = lambda model: getattr(model, 'module', model)\n state = {\n 'epoch': epoch,\n 'step': step,\n 'best_wer': best_wer,\n 'state_dict': unwrap_ddp(model).state_dict(),\n 'ema_state_dict': unwrap_ddp(ema_model).state_dict() if ema_model is not None else None,\n 'optimizer': optimizer.state_dict(),\n 'amp': amp.state_dict() if self.use_amp else None,\n }\n\n if is_best:\n fpath = os.path.join(\n self.save_dir, f\"{self.model_name}_best_checkpoint.pt\")\n else:\n fpath = os.path.join(\n self.save_dir, f\"{self.model_name}_epoch{epoch}_checkpoint.pt\")\n\n print_once(f\"Saving {fpath}...\")\n torch.save(state, fpath)\n\n if not is_best:\n # Remove old checkpoints; keep milestones and the last two\n self.tracked[epoch] = fpath\n for epoch in set(list(self.tracked)[:-2]) - set(self.keep_milestones):\n try:\n os.remove(self.tracked[epoch])\n except:\n pass\n del self.tracked[epoch]", "def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n directory = \"checkoutpoint/%s/\" % args.name\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'checkoutpoint/%s/' % args.name + 'model_best.pth.tar')", "def save(self):\n\n self.saver.save(self.sess, self.path + '/tensorflow-model', global_step=self.counter.count)", "def save_checkpoint(state, is_best, checkpoint_dir):\n\n if not os.path.exists(checkpoint_dir):\n os.mkdir(checkpoint_dir)\n\n last_file_path = os.path.join(checkpoint_dir, 'last_checkpoint.pytorch')\n torch.save(state, last_file_path)\n if is_best:\n best_file_path = os.path.join(checkpoint_dir, 'best_checkpoint.pytorch')\n shutil.copyfile(last_file_path, best_file_path)", "def checkpoint(self, epoch: int):\n if self.exp.scheduler_stepper is not None:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"scheduler_state_dict\": self.exp.scheduler_stepper.scheduler.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )\n else:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )", "def save_checkpoint(checkpoint_dir, model_encoder_noisy_clean, model_encoder_noisy_noise, model_decoder_noisy,\n model_encoder_clean, model_decoder_clean, model_encoder_noise, model_decoder_noise,\n model_classifier, min_eval_loss_melsp_y_dB, min_eval_loss_melsp_y_dB_std,\n iter_idx, min_idx, optimizer, numpy_random_state, torch_random_state, iterations, model_spkidtr=None):\n model_encoder_noisy_clean.cpu()\n model_encoder_noisy_noise.cpu()\n model_decoder_noisy.cpu()\n model_encoder_clean.cpu()\n model_decoder_clean.cpu()\n model_encoder_noise.cpu()\n model_decoder_noise.cpu()\n model_classifier.cpu()\n checkpoint = {\n \"model_encoder_noisy_clean\": model_encoder_noisy_clean.state_dict(),\n \"model_encoder_noisy_noise\": model_encoder_noisy_noise.state_dict(),\n \"model_decoder_noisy\": model_decoder_noisy.state_dict(),\n \"model_encoder_clean\": model_encoder_clean.state_dict(),\n \"model_decoder_clean\": model_decoder_clean.state_dict(),\n \"model_encoder_noise\": model_encoder_noise.state_dict(),\n \"model_decoder_noise\": model_decoder_noise.state_dict(),\n \"model_classifier\": model_classifier.state_dict(),\n \"min_eval_loss_melsp_y_dB\": min_eval_loss_melsp_y_dB,\n \"min_eval_loss_melsp_y_dB_std\": min_eval_loss_melsp_y_dB_std,\n \"iter_idx\": iter_idx,\n \"min_idx\": min_idx,\n \"optimizer\": optimizer.state_dict(),\n \"numpy_random_state\": numpy_random_state,\n \"torch_random_state\": torch_random_state,\n \"iterations\": iterations}\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n torch.save(checkpoint, checkpoint_dir + \"/checkpoint-%d.pkl\" % iterations)\n model_encoder_noisy_clean.cuda()\n model_encoder_noisy_noise.cuda()\n model_decoder_noisy.cuda()\n model_encoder_clean.cuda()\n model_decoder_clean.cuda()\n model_encoder_noise.cuda()\n model_decoder_noise.cuda()\n model_classifier.cuda()\n logging.info(\"%d-iter checkpoint created.\" % iterations)", "def save_checkpoint(model, filename, optimizer=None, meta=None):\n if meta is None:\n meta = {}\n elif not isinstance(meta, dict):\n raise TypeError(f'meta must be a dict or None, but got {type(meta)}')\n meta.update(mmcv_version=mmcv.__version__, time=time.asctime())\n\n if is_module_wrapper(model):\n model = model.module\n\n if hasattr(model, 'CLASSES') and model.CLASSES is not None:\n # save class name to the meta\n meta.update(CLASSES=model.CLASSES)\n\n checkpoint = {\n 'meta': meta,\n 'state_dict': weights_to_cpu(get_state_dict(model))\n }\n # save optimizer state dict in the checkpoint\n if isinstance(optimizer, Optimizer):\n checkpoint['optimizer'] = optimizer.state_dict()\n elif isinstance(optimizer, dict):\n checkpoint['optimizer'] = {}\n for name, optim in optimizer.items():\n checkpoint['optimizer'][name] = optim.state_dict()\n\n if filename.startswith('pavi://'):\n try:\n from pavi import modelcloud\n from pavi.exception import NodeNotFoundError\n except ImportError as e:\n raise ImportError(\n 'Please install pavi to load checkpoint from modelcloud.') from e\n model_path = filename[7:]\n root = modelcloud.Folder()\n model_dir, model_name = osp.split(model_path)\n try:\n model = modelcloud.get(model_dir)\n except NodeNotFoundError:\n model = root.create_training_model(model_dir)\n with TemporaryDirectory() as tmp_dir:\n checkpoint_file = osp.join(tmp_dir, model_name)\n with open(checkpoint_file, 'wb') as f:\n torch.save(checkpoint, f)\n f.flush()\n model.create_file(checkpoint_file, name=model_name)\n else:\n mmcv.mkdir_or_exist(osp.dirname(filename))\n # immediately flush buffer\n with open(filename, 'wb') as f:\n torch.save(checkpoint, f)\n f.flush()", "def save_checkpoint(self):\n if not self.save_ckpt:\n return\n\n lookup = None\n is_best = False\n checkpoint = self.create_checkpoint()\n\n # save best only or not?\n if self.save_best_only:\n if self.valid_dataloader:\n for item in [self.valid_metric_meters, self.valid_loss_meters]:\n if self.primary_indicator in item:\n lookup = item\n else:\n for item in [self.train_metric_meters, self.train_loss_meters]:\n if self.primary_indicator in item:\n lookup = item\n if lookup:\n value = lookup[self.primary_indicator].avg\n if self.best_mode == 'min':\n if value < self.best_indicator:\n self.best_indicator = value\n is_best = True\n else:\n if value > self.best_indicator:\n self.best_indicator = value\n is_best = True\n\n # TODO: better naming convention\n if self.valid_dataloader:\n metric_string = '-'.join([\n f'{metric}-[{self.valid_metric_meters[metric].avg:.5f}]'\n for metric in self.valid_metric_meters\n ])\n loss_string = '-'.join([\n f'{loss}-[{self.valid_loss_meters[loss].avg:.5f}]'\n for loss in self.valid_loss_meters\n ])\n else:\n metric_string = '-'.join([\n f'{metric}-[{self.train_metric_meters[metric].avg:.5f}]'\n for metric in self.train_metric_meters\n ])\n loss_string = '-'.join([\n f'{loss}-[{self.train_loss_meters[loss].avg:.5f}]'\n for loss in self.train_loss_meters\n ])\n # TODO: use config for paths\n # make subdir\n folder = Path(self.save_path, str(self.fold_idx))\n folder.mkdir(parents=True, exist_ok=True)\n if not self.save_best_only or (self.save_best_only and is_best):\n torch.save(checkpoint,\n f'{folder}/ep-[{self.epoch}]-iter-[{self.iter}]-{loss_string}-{metric_string}.pth')", "def save_checkpoint(state: dict, is_best: bool, filename: str = 'checkpoint.pth.tar', args: Namespace = None):\n directory = f\"runs/{args.name}/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, f'runs/{args.name}/model_best.pth.tar')", "def save_checkpoint(self, name, include_optimizers=True):\n if not self.params.is_master:\n return\n path = os.path.join(self.params.dump_path, '%s.pth' % name)\n logger.info(\"Saving %s to %s ...\" % (name, path))\n\n data = {\n 'epoch': self.epoch,\n 'n_total_iter': self.n_total_iter,\n 'best_metrics': self.best_metrics,\n 'best_stopping_criterion': self.best_stopping_criterion,\n }\n\n for name in self.MODEL_NAMES:\n logger.warning(\"Saving %s parameters ...\" % name)\n data[name] = getattr(self, name).state_dict()\n if include_optimizers:\n for name in self.optimizers.keys():\n logger.warning(\"Saving %s optimizer ...\" % name)\n data['%s_optimizer' % name] = self.optimizers[name].state_dict()\n\n # data['dico_id2word'] = self.data['dico'].id2word\n # data['dico_word2id'] = self.data['dico'].word2id\n # data['dico_counts'] = self.data['dico'].counts\n data['params'] = {k: v for k, v in self.params.__dict__.items()}\n\n torch.save(data, path)", "def save_checkpoint(model, state, is_best, checkpoint):\n state_filepath = os.path.join(checkpoint, 'last.pth.tar')\n model_filepath = os.path.join(checkpoint, 'last_model.pth')\n if not os.path.exists(checkpoint):\n print(\"Checkpoint Directory does not exist! Making directory {}\".format(checkpoint))\n os.mkdir(checkpoint)\n else:\n print(\"Checkpoint Directory exists! \")\n torch.save(state, state_filepath)\n torch.save(model, model_filepath)\n if is_best:\n shutil.copyfile(state_filepath, os.path.join(checkpoint, 'best.pth.tar'))\n shutil.copyfile(model_filepath, os.path.join(checkpoint, 'best_model.pth'))", "def save_checkpoint(state, filename):\n torch.save(state, filename) # save checkpoint", "def save_checkpoint(ckpt_dir, model, optim, scheduler, epoch, global_step):\n states = {\n 'model': model.state_dict(),\n 'optim': optim.state_dict(),\n 'epoch': epoch,\n 'global_step': global_step\n }\n if scheduler is not None:\n states['scheduler'] = scheduler.state_dict()\n ckpt_path = os.path.join(ckpt_dir, '[ep-{:02d}]giter-{}.ckpt'.format(epoch, global_step))\n torch.save(states, ckpt_path)\n\n return ckpt_path", "def save(self, checkpoint_dir, step):\n model_name = \"CNN.model\"\n model_dir = \"%s\" % (\"cnn\")\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n \n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n \n self.saver.save(self.sess,\n os.path.join(checkpoint_dir, model_name),\n global_step=step)", "def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix):\n\n checkpoint_path = os.path.join(model_dir, checkpoint_prefix)\n saved_path = checkpoint.save(checkpoint_path)\n logging.info('Saving model as TF checkpoint: %s', saved_path)\n return", "def save_states(self, checkpoint):\n raise NotImplementedError()", "def save_checkpoint(self, filename='checkpoint.pth'):\n torch.save(self.state_dict(), filename)", "def save_checkpoint(model: nn.Module, args: Namespace, path: str):\r\n state = {\r\n 'args': args,\r\n 'state_dict': model.state_dict()\r\n }\r\n torch.save(state, path)", "def save_model(self):\n save_folder = os.path.join(self.log_path, \"models\", \"weights_{}\".format(self.epoch))\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n for model_name in [\"encoder\", \"decoder\"]:\n save_path = os.path.join(save_folder, \"{}.pth\".format(model_name))\n if model_name == 'encoder':\n to_save = self.encoder.state_dict()\n # save the sizes - these are needed at prediction time\n to_save['height'] = self.opt.height\n to_save['width'] = self.opt.width\n else:\n to_save = self.decoder.state_dict()\n torch.save(to_save, save_path)\n\n save_path = os.path.join(save_folder, \"{}.pth\".format(\"adam\"))\n torch.save(self.optimizer.state_dict(), save_path)", "def _save_model_checkpoints(self, global_step):\n self.netG.save_checkpoint(directory=self.netG_ckpt_dir,\n global_step=global_step,\n optimizer=self.optG)\n\n if self.netD is not None:\n self.netD.save_checkpoint(directory=self.netD_ckpt_dir,\n global_step=global_step,\n optimizer=self.optD)\n\n if self.train_drs:\n self.netD_drs.save_checkpoint(directory=self.netD_drs_ckpt_dir,\n global_step=global_step,\n optimizer=self.optD_drs)", "def write_checkpoint(self, session):\n base_save_path = self.params.cp_save_dir+self.params.model_name+\"_v\"+self.params.version\n full_save_path = self.full_saver.save(session,\n save_path=base_save_path,\n global_step=self.global_step,\n latest_filename=self.params.cp_latest_filename)\n self.logger.log_info(\"Full model saved in file %s\"%full_save_path)\n return base_save_path", "def save_checkpoint(self, epoch: int) -> Path:\n logging.getLogger().disabled = True\n model_state_dict = self.model.module.state_dict() \\\n if isinstance(self.model, torch.nn.DataParallel) else self.model.state_dict()\n checkpoint_file_path = self.config.get_path_to_checkpoint(epoch)\n checkpoint_file_path.parent.mkdir(exist_ok=True, parents=True)\n info_to_store = {\n ModelAndInfo.EPOCH_KEY: epoch,\n ModelAndInfo.MODEL_STATE_DICT_KEY: model_state_dict,\n ModelAndInfo.OPTIMIZER_STATE_DICT_KEY: self.optimizer.state_dict()\n }\n if self.config.compute_mean_teacher_model:\n assert self.mean_teacher_model is not None # for mypy, getter has this built in\n mean_teacher_model_state_dict = self.mean_teacher_model.module.state_dict() \\\n if isinstance(self.mean_teacher_model, torch.nn.DataParallel) \\\n else self.mean_teacher_model.state_dict()\n info_to_store[ModelAndInfo.MEAN_TEACHER_STATE_DICT_KEY] = mean_teacher_model_state_dict\n\n torch.save(info_to_store, checkpoint_file_path)\n logging.getLogger().disabled = False\n logging.info(f\"Saved model checkpoint for epoch {epoch} to {checkpoint_file_path}\")\n return checkpoint_file_path", "def save_checkpoint(epoch, outdir, model, mapper, optimizer, criterion,\n filename='checkpoint.OWE.pth.tar'):\n filename = outdir / filename\n logger.info(\"Saving checkpoint to {}.\".format(filename))\n torch.save({'epoch': epoch,\n 'model': model.state_dict(),\n 'mapper': mapper.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }, str(filename))\n if max(criterion) == criterion[-1]:\n best_name = str(outdir / 'best_checkpoint.OWE.pth.tar')\n shutil.copyfile(str(filename), best_name)\n logger.info(\"Saved best checkpoint to {}.\".format(best_name))", "def finalise(self):\n self.logger.info(\"Saving final versions of model...\")\n self.save_checkpoint(filename='final.pth.tar')", "def save_model(file_name, ep, model, optimizer):\n\n torch.save({\n 'epoch': ep,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }, file_name) \n \n return", "def _save_state(self, saver, session, data, checkpts_path):\n # Save variable state\n if checkpts_path:\n logging.info('Saving cotrain checkpoint at %s.', checkpts_path)\n saver.save(session, checkpts_path, write_meta_graph=False)\n\n # Save dataset state.\n if self.data_dir:\n logging.info('Saving self-labeled dataset backup.')\n data.save_state_to_file(self.data_dir)", "def save_end_model(model, optimizer, logger):\n logger.info('Saving model at the end of training...')\n try:\n torch.save({\n 'epoch': cfg.n_epochs,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }, cfg.model_final_path)\n\n except FileNotFoundError as fnf_error:\n logger.error(f'{fnf_error}')\n else:\n logger.info('Saved!')", "def save_checkpoint(self, filename, extra_state):\n if distributed_utils.is_master(self.args): # only save one checkpoint\n utils.save_state(\n filename, self.args, self.get_model(), self.criterion, self.optimizer,\n self.lr_scheduler, self._num_updates, self._optim_history, extra_state,\n )", "def save_checkpoint(checkpoint_dir, epoch, iteration, save_dict):\n os.makedirs(checkpoint_dir, exist_ok=True)\n path = opj(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n assert epoch == save_dict['epoch'], \"`epoch` != save_dict's `start_epoch`\"\n assert iteration == save_dict['iteration'], \"`iteration` != save_dict's `start_iteration`\"\n if os.path.isfile(path):\n print(\"Overwrite checkpoint in epoch %d, iteration %d :exclamation:\" % (epoch, iteration))\n try:\n torch.save(save_dict, path)\n except Exception:\n raise Exception(\"Fail to save checkpoint\")\n \n print(\"Checkpoint %s saved :heavy_check_mark:\" % (str(epoch) + '.' + str(iteration) + '.ckpt'))", "def save(self) -> None:\n self.saver.save_model_and_weights(self.model)\n self.saver.save_data_shuffle_indices(\n self.data.eval_shuffler.ds_inds\n )\n self.saver.save_input_scaler(self.data.x.scaler)", "def save(self, path=\"./trained_model.checkpoint\"):\n torch.save({\"state_dict\":self.working_q.state_dict}, path)", "def save(self, sess):\n ckpt_path = os.path.join(self.model.ckpt_dir, 'model')\n if not os.path.exists(self.model.ckpt_dir):\n os.makedirs(self.model.ckpt_dir)\n self.saver.save(sess, ckpt_path, global_step=self.gstep)", "def save_checkpoint(state, is_best, file_path, file_name='checkpoint.pth.tar'):\n\n save_path = file_path + '/' + file_name\n torch.save(state, save_path)\n if is_best:\n shutil.copyfile(save_path, file_path + '/model_best.pth.tar')", "def save_checkpoint(dir, state, is_best, filename='checkpoint.pth.tar'):\n directory = \"%s/\" % (dir)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, '%s/' %\n (dir) + 'model_best.pth.tar')", "def save_checkpoint(state, is_best, filename=\"checkpoint.pth.tar\"):\n # only save from rank 0 process to avoid race condition\n rank = comm.get().get_rank()\n if rank == 0:\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, \"model_best.pth.tar\")", "def save_session(self):\n\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n checkpoint_dir = os.path.abspath(os.path.join(self.FLAGS.model_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n current_step = tf.train.global_step(self.session, self.global_step)\n path = self.saver.save(self.session, checkpoint_prefix, global_step=current_step)\n print(\"Saved model checkpoint to {}\\n\".format(path))", "def save_checkpoint(state, is_best, checkpoint_dir, logger=None):\n\n def log_info(message):\n if logger is not None:\n logger.info(message)\n\n if not os.path.exists(checkpoint_dir):\n log_info(\n \"Checkpoint directory does not exists. Creatding {}\".format(checkpoint_dir))\n os.mkdir(checkpoint_dir)\n\n last_file_path = os.path.join(checkpoint_dir, 'last_checkpoint.pytorch')\n log_info(\"Saving last checkpoint\")\n torch.save(state, last_file_path)\n if is_best:\n best_file_path = os.path.join(checkpoint_dir, 'best_checkpoint.pytorch')\n log_info(\"Saving best checkpoint\")\n shutil.copyfile(last_file_path, best_file_path)", "def save(self):\r\n # torch.save(self.model.state_dict, os.path.join(self.ckpt_dir, 'best_model_state_dict.pt'))\r\n torch.save(self.model, os.path.join(self.ckpt_dir, 'best_model_INN.pt'))", "def save(self, model_dir, step, epoch, is_best=False):\n if model_dir is None:\n return\n save_checkpoint(self._model, self._optimizer, step, epoch, model_dir,\n keep_every_n=self._keep_every_n, is_best=is_best)", "def save(self, checkpoint_path: str):\r\n raise NotImplementedError", "def save_checkpoint(self, state, is_best, filename='checkpoint.pth.tar'):\n filename = os.path.join(self.experiment_dir, filename)\n torch.save(state, filename)\n if is_best:\n filename_best = os.path.join(self.experiment_dir,'best.pth.tar')\n torch.save(state,filename_best)\n best_pred = state['best_pred']\n with open(os.path.join(self.experiment_dir, 'best_pred.txt'), 'w') as f:\n f.write(str(best_pred))\n if not os.path.exists(os.path.join(self.directory,'best_pred.txt')):\n with open(os.path.join(self.directory,'best_pred.txt'),'w') as f:\n f.write(str(best_pred))\n shutil.copyfile(filename, os.path.join(self.directory, 'model_best.pth.tar'))\n else:\n with open(os.path.join(self.directory,'best_pred.txt'),'r') as f:\n max_iou = float(f.readline())\n if best_pred > max_iou:\n with open(os.path.join(self.directory,'best_pred.txt'),'w') as f:\n f.write(str(best_pred))\n shutil.copyfile(filename, os.path.join(self.directory, 'model_best.pth.tar'))", "def save_model(self):\n save_folder = os.path.join(self.log_path, \"models\", \"weights_{}\".format(self.epoch))\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n for model_name, model in self.models.items():\n print(\"MODEL NAME = {}\".format(model_name))\n save_path = os.path.join(save_folder, \"{}.pth\".format(model_name))\n to_save = model.state_dict()\n if model_name == 'encoder':\n # save the sizes - these are needed at prediction time\n to_save['height'] = self.height\n to_save['width'] = self.width\n torch.save(to_save, save_path)\n\n save_path = os.path.join(save_folder, \"{}.pth\".format(\"adam\"))\n torch.save(self.model_optimizer.state_dict(), save_path)", "def save_checkpoint(self, checkpoint: str) -> str:\n\n # Some model might need to aggregate variables during checkpointing\n # which requires both the chief and workers to participate in the\n # allreduce communication protocol.\n # So we need to call get_state on every remote workers, otherwise\n # it might get stuck\n state_refs = [w.get_state.remote() for w in self.remote_workers]\n\n state = ray.get(state_refs[0])\n\n with open(checkpoint, \"wb\") as f:\n SafePickle.dump(state, f)\n\n return checkpoint", "def save(self):\n\n pattern = '{}_{}_{}ep.pt' if self.checkpoint_filename_pattern is None else self.checkpoint_filename_pattern\n filename = pattern.format('sherlock1', time.strftime(\"%Y-%m-%d_%H-%M-%S\"),\n self.monitors['loss_train'].num_epochs)\n full_filename = self.full_path(filename)\n c = {\n 'state_dict': self.net.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'monitors': self.monitors,\n 'parent': self.parent,\n 'args': vars(args) # convert args to dict\n }\n torch.save(c, full_filename)\n if not args.tuning and args.delete and self.last_checkpoint is not None:\n os.remove(self.last_checkpoint)\n self.last_checkpoint = full_filename\n return filename", "def save(self):\n try:\n torch.save(self.model.state_dict(), os.path.join(self.save_path, \"save_point.pth\"))\n except:\n print(\"Unable to save the model\")", "def save_checkpoint(state, is_best, checkpoint_dir, logger=None):\n\n def log_info(message):\n if logger is not None:\n logger.info(message)\n\n if not os.path.exists(checkpoint_dir):\n log_info(\n f\"Checkpoint directory does not exists. Creating {checkpoint_dir}\")\n os.mkdir(checkpoint_dir)\n\n last_file_path = os.path.join(checkpoint_dir, f'epoch{state[\"epoch\"]}_checkpoint.pytorch')\n log_info(f\"Saving last checkpoint to '{last_file_path}'\")\n torch.save(state, last_file_path)\n if is_best:\n best_file_path = os.path.join(checkpoint_dir, 'best_checkpoint.pytorch')\n log_info(f\"Saving best checkpoint to '{best_file_path}'\")\n shutil.copyfile(last_file_path, best_file_path)", "def checkpoint_save(self, epoch, model, label=None, checkpoint=None, path=\"\"):\n\n if label is None:\n label = f\"checkpoint-{epoch}\"\n else:\n label = f\"{label}-checkpoint-{epoch}\"\n\n if checkpoint is None:\n pass\n elif checkpoint == -1:\n Potentials.save(model=model, label=label, path=path)\n elif epoch % checkpoint == 0:\n Potentials.save(model=model, label=label, path=path)", "def save_checkpoint(checkpoint_dir, model_encoder_melsp, model_decoder_melsp, model_encoder_excit, model_decoder_excit,\n model_classifier, model_post, min_eval_loss_melsp_dB, min_eval_loss_melsp_dB_std, min_eval_loss_melsp_cv,\n min_eval_loss_melsp, min_eval_loss_melsp_dB_src_trg, min_eval_loss_melsp_dB_src_trg_std, min_eval_loss_laplace,\n min_eval_loss_laplace_cv, iter_idx, min_idx, optimizer, numpy_random_state, torch_random_state,\n iterations, model_spkidtr=None):\n model_encoder_melsp.cpu()\n model_decoder_melsp.cpu()\n model_encoder_excit.cpu()\n model_decoder_excit.cpu()\n model_classifier.cpu()\n model_post.cpu()\n if model_spkidtr is not None:\n model_spkidtr.cpu()\n checkpoint = {\n \"model_encoder_melsp\": model_encoder_melsp.state_dict(),\n \"model_decoder_melsp\": model_decoder_melsp.state_dict(),\n \"model_encoder_excit\": model_encoder_excit.state_dict(),\n \"model_decoder_excit\": model_decoder_excit.state_dict(),\n \"model_classifier\": model_classifier.state_dict(),\n \"model_spkidtr\": model_spkidtr.state_dict(),\n \"model_post\": model_post.state_dict(),\n \"min_eval_loss_melsp_dB\": min_eval_loss_melsp_dB,\n \"min_eval_loss_melsp_dB_std\": min_eval_loss_melsp_dB_std,\n \"min_eval_loss_melsp_cv\": min_eval_loss_melsp_cv,\n \"min_eval_loss_melsp\": min_eval_loss_melsp,\n \"min_eval_loss_melsp_dB_src_trg\": min_eval_loss_melsp_dB_src_trg,\n \"min_eval_loss_melsp_dB_src_trg_std\": min_eval_loss_melsp_dB_src_trg_std,\n \"min_eval_loss_laplace\": min_eval_loss_laplace,\n \"min_eval_loss_laplace_cv\": min_eval_loss_laplace_cv,\n \"iter_idx\": iter_idx,\n \"min_idx\": min_idx,\n \"optimizer\": optimizer.state_dict(),\n \"numpy_random_state\": numpy_random_state,\n \"torch_random_state\": torch_random_state,\n \"iterations\": iterations}\n else:\n checkpoint = {\n \"model_encoder_melsp\": model_encoder_melsp.state_dict(),\n \"model_decoder_melsp\": model_decoder_melsp.state_dict(),\n \"model_encoder_excit\": model_encoder_excit.state_dict(),\n \"model_decoder_excit\": model_decoder_excit.state_dict(),\n \"model_classifier\": model_classifier.state_dict(),\n \"model_post\": model_post.state_dict(),\n \"min_eval_loss_melsp_dB\": min_eval_loss_melsp_dB,\n \"min_eval_loss_melsp_dB_std\": min_eval_loss_melsp_dB_std,\n \"min_eval_loss_melsp_cv\": min_eval_loss_melsp_cv,\n \"min_eval_loss_melsp\": min_eval_loss_melsp,\n \"min_eval_loss_melsp_dB_src_trg\": min_eval_loss_melsp_dB_src_trg,\n \"min_eval_loss_melsp_dB_src_trg_std\": min_eval_loss_melsp_dB_src_trg_std,\n \"min_eval_loss_laplace\": min_eval_loss_laplace,\n \"min_eval_loss_laplace_cv\": min_eval_loss_laplace_cv,\n \"iter_idx\": iter_idx,\n \"min_idx\": min_idx,\n \"optimizer\": optimizer.state_dict(),\n \"numpy_random_state\": numpy_random_state,\n \"torch_random_state\": torch_random_state,\n \"iterations\": iterations}\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n torch.save(checkpoint, checkpoint_dir + \"/checkpoint-%d.pkl\" % iterations)\n model_encoder_melsp.cuda()\n model_decoder_melsp.cuda()\n model_encoder_excit.cuda()\n model_decoder_excit.cuda()\n model_classifier.cuda()\n model_post.cuda()\n if model_spkidtr is not None:\n model_spkidtr.cuda()\n logging.info(\"%d-iter checkpoint created.\" % iterations)", "def save_checkpoint(self, accuracy = None):\n state_dict = {\n 'epoch': self.epoch + 1,\n 'state_dict': self.model.state_dict(),\n 'optim_dict': self.optimizer.state_dict()\n }\n torch.save(state_dict,\n os.path.join(self.checkpoints_path,\n \"last.pth\".format(accuracy)))\n if accuracy is not None and accuracy > self.best_accuracy:\n if self.best_accuracy > 0:\n os.remove(\n os.path.join(\n self.checkpoints_path,\n \"best_{acc:.4f}.pth\".format(acc=self.best_accuracy)\n )\n )\n self.best_accuracy = accuracy\n torch.save(state_dict,\n os.path.join(self.checkpoints_path,\n \"best_{acc:.4f}.pth\".format(acc=accuracy)))\n self.best_accuracy = accuracy", "def save_checkpoint(model, epoch, checkpoint_dir, stats):\n state = {\n \"epoch\": epoch,\n \"state_dict\": model.state_dict(),\n \"stats\": stats,\n }\n\n filename = os.path.join(checkpoint_dir, \"epoch={}.checkpoint.pth.tar\".format(epoch))\n torch.save(state, filename)", "def save_checkpoint(state, filename='checkpoint.pth.tar'):\n torch.save(state, filename)", "def save(self, sess, save_path=\"./log/model.ckpt\", verbose=True):\n if(verbose): print(\"Saving model in: \" + str(save_path))\n save_path = self.tf_saver.save(sess, save_path)\n if(verbose): print(\"Done!\")", "def save_checkpoint(gen_model, dsc_losses, gen_losses, checkpoint_file):\n\n saved = False\n checkpoint_file = f'{checkpoint_file}.pt'\n\n # TODO:\n # Save a checkpoint of the generator model. You can use torch.save().\n # You should decide what logic to use for deciding when to save.\n # If you save, set saved to True.\n # ====== YOUR CODE: ======\n epoch = len(gen_losses)\n\n saved = True\n if len(gen_losses) >= 2:\n if gen_losses[-1] > gen_losses[-2]:\n saved = False\n\n if saved and checkpoint_file is not None:\n saved_state = gen_model\n torch.save(saved_state, checkpoint_file)\n print(f'*** Saved checkpoint {checkpoint_file} '\n f'at epoch {epoch}')\n # ========================\n\n return saved", "def save_model(self):\n self.pred_net.save((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.save((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def save(self, epoch=None, note=None):\n\n checkpoint_encoder = {\n 'type': \"transformer\",\n 'model': self.model.encoder.state_dict(),\n 'epoch': epoch,\n 'settings': self.opt\n }\n\n if checkpoint_encoder['settings'].telegram:\n del checkpoint_encoder['settings'].telegram\n\n checkpoint_decoder = {\n 'type': \"transformer\",\n 'model': self.model.decoder.state_dict(),\n 'generator': self.model.generator.state_dict(),\n 'epoch': epoch,\n 'settings': self.opt\n }\n\n if checkpoint_decoder['settings'].telegram:\n del checkpoint_decoder['settings'].telegram\n\n if not note:\n note = \"\"\n\n # make sure a path is specified prior to saving the files.\n if self.opt.save_model:\n ready_to_save = False\n if self.opt.save_mode == \"all\":\n model_name = \"_\" + str(note)\n ready_to_save = True\n else:\n # assumes self.opt.save_mode = \"best\"\n if self.valid_accs[-1] >= max(self.valid_accs):\n model_name = \"\"\n ready_to_save = True\n if self.opt.verbose:\n print(\n ' - [Info] The checkpoint file has been updated.')\n if ready_to_save:\n encoder_name = \"encoder\" + model_name + \".chkpt\"\n decoder_name = \"decoder\" + model_name + \".chkpt\"\n # setup directory to save this at.\n encoder_filepath = os.path.join(\n self.opt.directory, encoder_name)\n decoder_filepath = os.path.join(\n self.opt.directory, decoder_name)\n torch.save(checkpoint_encoder, encoder_filepath)\n torch.save(checkpoint_decoder, decoder_filepath)\n else:\n if not self.save_trip:\n if self.opt.verbose:\n print(\n \" - [Warning]: the model is not specified to save.\")\n self.save_trip = True", "def save_checkpoint(state, is_best, checkpoint):\n filepath = os.path.join(checkpoint, 'last.pth.tar')\n if not os.path.exists(checkpoint):\n print(\"Checkpoint Directory does not exist! Making directory {}\".format(checkpoint))\n os.mkdir(checkpoint)\n torch.save(state, filepath)\n # 如果是最好的checkpoint则以best为文件名保存\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'best.pth.tar'))", "def backup_session(saver, sess, model_dir, global_t, n_episode=0):\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n filename = \"checkpoint-%d\" % (n_episode)\n saver.save(sess, model_dir + \"/\" + filename, global_step=global_t)\n return", "def save_model(self):\n for index, agent in enumerate(self.agents):\n torch.save(agent.actor_local.state_dict(), 'agent{}_checkpoint_actor.pth'.format(index + 1))\n torch.save(agent.critic_local.state_dict(), 'agent{}_checkpoint_critic.pth'.format(index + 1))", "def save_checkpoint(self, path: str, **kwargs):\n if self.distributed:\n encoder = self.net_q.module.encoder\n head = self.net_q.module.head\n else:\n encoder = self.net_q.encoder\n head = self.net_q.head\n\n ckpt = {\n 'encoder': encoder.state_dict(),\n 'head': head.state_dict(),\n 'net_ps': self.net_ps.state_dict(),\n 'net_k': self.net_k.state_dict(),\n 'queue': self.queue.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'scheduler': self.scheduler.state_dict(),\n }\n if kwargs:\n ckpt.update(kwargs)\n torch.save(ckpt, path)", "def save(self, epoch: int, path: str = 'model.pt'):\n state_dict = {\n 'model_state_dict': self.state_dict(),\n 'epoch': epoch,\n 'ac_optim_dict': self.actor_optimizer.state_dict(),\n 'critic_optim_dict': self.critic_optimizer.state_dict()\n }\n\n torch.save(state_dict, path)", "def save_model(self, epoch):\n ckpt_path = os.path.join(self.config.save_path, f'{epoch}.pkl')\n print(f'Save parameters to {ckpt_path}')\n torch.save(self.model.state_dict(), ckpt_path)", "def save_model(self, is_best, state, epoch):\n path = os.path.join(self.logpath_models, 'model-%d.pth.tar' % epoch)\n torch.save(state, path)\n if is_best:\n shutil.copyfile(path, path + 'model_best.pth.tar')" ]
[ "0.81574863", "0.81427485", "0.7992224", "0.7981823", "0.79559743", "0.7939416", "0.78992295", "0.7896152", "0.7881963", "0.78608304", "0.78405166", "0.77503026", "0.77258694", "0.77220285", "0.7713433", "0.76902556", "0.76812625", "0.76755613", "0.7660106", "0.76497287", "0.76352847", "0.7628766", "0.7628042", "0.76266044", "0.762098", "0.7620327", "0.7600577", "0.7597789", "0.75923413", "0.7591946", "0.7589424", "0.7583478", "0.7577823", "0.75542825", "0.75494915", "0.7547007", "0.75277597", "0.7503991", "0.7502543", "0.7483385", "0.74807423", "0.74713784", "0.74577034", "0.74550563", "0.74373305", "0.74290085", "0.74259585", "0.7420954", "0.74130446", "0.7412799", "0.7409677", "0.7389624", "0.7379212", "0.7377766", "0.7377408", "0.73747164", "0.736369", "0.73546755", "0.7353205", "0.7350186", "0.7346305", "0.7345843", "0.7331717", "0.7330485", "0.7328168", "0.7327767", "0.73256236", "0.73159546", "0.73137635", "0.7302488", "0.7302146", "0.72974324", "0.727342", "0.7268462", "0.72634923", "0.72505575", "0.72491926", "0.7247396", "0.7240464", "0.7240168", "0.7230167", "0.7215586", "0.7210122", "0.72083855", "0.72052586", "0.7189883", "0.7187267", "0.71758354", "0.71499795", "0.71422696", "0.7129116", "0.7128499", "0.7121108", "0.7102532", "0.7098161", "0.70943797", "0.7093883", "0.70869523", "0.7082612", "0.7081514" ]
0.7845735
10
Thank you stack overflow
def parseNumList(input): m = re.match(r'(\d+)(?:-(\d+))?(?:-(\d+))?$', input) # ^ (or use .split('-'). anyway you like.) if not m: raise ArgumentTypeError("'" + input + "' is not a range of number. Expected forms like '1-5' or '2' or '10-15-2'.") start = int(m.group(1)) end = int(m.group(2)) if m.group(3): increment = int(m.group(3)) else: increment = 1 return list(range(start, end+1, increment))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exo2():", "def substantiate():", "def sth():", "def degibber(self):", "def cx():", "def mezclar_bolsa(self):", "def regular(self):", "def reckon(self):", "def result(self):", "def result(self):", "def falcon():", "def healthcare():", "def exercise_b2_106():\r\n pass", "def exercise_b2_107():\r\n pass", "def support(self):", "def g():", "def call(self):", "def think(self):\n pass", "def exercise_b2_53():\r\n pass", "def exercise_b2_69():\r\n pass", "def exercise_b2_113():\r\n pass", "def exercise_b2_52():\r\n pass", "def think(s):", "def pulp_smash():", "def _regr_basic():", "def exercise_b2_98():\r\n pass", "def problem_298():\n pass", "def transact(self):", "def transact(self):", "def simple():", "def simple():", "def exercise_b2_70():\r\n pass", "def exercise_b2_82():\r\n pass", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def use(self):", "def note():", "def exercise_b2_27():\r\n pass", "def CL(self):", "def firstFunction(self):", "def render(self):", "def process(self):", "def process(self):", "def process(self):", "def apply(self):", "def code():", "def apply(self) -> None:", "def apply(self) -> None:", "def exercise_b2_56():\r\n pass", "def exercise_b2_26():\r\n pass", "def info(self):", "def info(self):", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def sender(self) -> str:", "def c(self):\n pass", "def c(self):\n pass", "def DM(self):", "def exercise_b2_39():\r\n pass", "def smarter():\r\n pass", "def __call__(self):\n\t\treturn", "def MINET(self):", "def __call__(self) -> None:", "def exercise_b2_43():\r\n pass", "def exercise_b2_95():\r\n pass", "def display_message():", "def one(self):", "def generate(self):", "def info(self) -> int:", "def explain(self):", "def compute_debug(self):", "def question_7():\n return None", "def question_11():\n return None", "def main(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def question_new_translate():", "def task4_1(self):\n\n pass", "def task4(self):\n\n pass", "def realsense():\n pass", "def horde_message(self, message):", "def pick_up(self):", "def fn():", "def question_4():\n return None", "def exercise_b2_93():\r\n pass", "def b(self):\n pass", "def b(self):\n pass" ]
[ "0.64075494", "0.6128703", "0.60941494", "0.60059685", "0.592511", "0.5874032", "0.5867482", "0.58433014", "0.58196574", "0.58196574", "0.56822956", "0.5674214", "0.5626368", "0.56027365", "0.5581712", "0.5579045", "0.55690277", "0.5562525", "0.55460745", "0.5500938", "0.5495509", "0.547055", "0.5444945", "0.5420381", "0.5414007", "0.54126316", "0.54068494", "0.54013467", "0.54013467", "0.5398172", "0.5398172", "0.53952587", "0.53889716", "0.5386505", "0.5386505", "0.5386505", "0.5386505", "0.5386505", "0.536501", "0.5361194", "0.5355513", "0.5351463", "0.53491855", "0.53489876", "0.5342912", "0.5342912", "0.5342912", "0.5340769", "0.5321971", "0.5307811", "0.5307811", "0.5305509", "0.53028053", "0.5295419", "0.5295419", "0.52915657", "0.52915657", "0.52915657", "0.52915657", "0.52915657", "0.52906924", "0.5288177", "0.5288177", "0.5280664", "0.52777797", "0.52757704", "0.52666247", "0.52632606", "0.5261917", "0.52498114", "0.5247268", "0.52374154", "0.52261925", "0.5221458", "0.52180094", "0.5211373", "0.5203115", "0.519865", "0.5197389", "0.5195923", "0.51950485", "0.51950485", "0.51950485", "0.51950485", "0.51950485", "0.51950485", "0.51950485", "0.51950485", "0.51950485", "0.51950485", "0.5195045", "0.5194032", "0.518991", "0.51879174", "0.51873803", "0.51794297", "0.51779306", "0.5172686", "0.5159465", "0.51547354", "0.51547354" ]
0.0
-1
Build a hierarchy of levels for Sunburst or Treemap charts. Levels are given starting from the bottom to the top of the hierarchy, ie the last level corresponds to the root.
def build_hierarchical_dataframe(df, levels, value_column, color_columns=None): df_all_trees = pd.DataFrame(columns=['id', 'parent', 'value', 'color']) for i, level in enumerate(levels): df_tree = pd.DataFrame(columns=['id', 'parent', 'value', 'color']) dfg = df.groupby(levels[i:]).sum() dfg = dfg.reset_index() df_tree['id'] = dfg[level].copy() if i < len(levels) - 1: df_tree['parent'] = dfg[levels[i+1]].copy() else: df_tree['parent'] = 'total' df_tree['value'] = dfg[value_column] df_tree['color'] = dfg[color_columns[0]] / dfg[color_columns[1]] df_all_trees = df_all_trees.append(df_tree, ignore_index=True) total = pd.Series(dict(id='total', parent='', value=df[value_column].sum(), color=df[color_columns[0]].sum() / df[color_columns[1]].sum())) df_all_trees = df_all_trees.append(total, ignore_index=True) return df_all_trees
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_levels(self):\r\n level = (self.root,)\r\n while level:\r\n yield level\r\n level = tuple(child for node in level for child in node.children)", "def represent_tree_levels(self, levels):\r\n prev_node_end = 0 \r\n level_string = []\r\n for level in levels:\r\n prev_node_end = 0 \r\n level_string = []\r\n for node in level: \r\n node_to_str = str(node.keys)\r\n space_between_nodes = node.str_pos - prev_node_end \r\n level_string.extend((\" \"*space_between_nodes, node_to_str))\r\n prev_node_end = node.str_pos + len(node_to_str)\r\n\r\n yield \"\".join(level_string)", "def createLevelMap(self):\n\t\tfor a in self.hierarchy.iterkeys():\n\t\t\tself.lvl = 0\n\t\t\tself.calcLevel(a)\n\t\t\tif self.lvl > self.levelMap.highestLevel: self.levelMap.highestLevel = self.lvl\n\t\t\tself.levelMap.addLevelData(AgentName=a, Level=self.lvl)", "def createLevelMap(self):\n for a in self.hierarchy.iterkeys():\n self.lvl = 0\n self.calcLevel(a)\n if self.lvl > self.levelMap.highestLevel: self.levelMap.highestLevel = self.lvl\n self.levelMap.addLevelData(AgentName=a, Level=self.lvl)", "def buildTree(data, level):\n \n node = maxIG(data)\n subsets = splitBy(data, node[0])\n header = [\"Outlook\", \"Temp\", \"Humidity\", \"Wind\", \"Play\"]\n \n if node[1] == 0:\n print(\"\\t\" * level, level, getColumn(data, node[0])[0], \":\", getColumn(data, -1)[0]) \n elif level < 4:\n print(\"\\t\" * level, level, getColumn(data, level - 1)[0], \"->\", header[node[0]]) \n rec = [buildTree(subset, level + 1) for subset in subsets]\n else:\n print(\"\\t\" * level, level, getColumn(data, level - 1)[0], \":\", getColumn(data, -1))", "def represent_tree_branches(self, levels):\r\n for level in levels[:-1]:\r\n branch = []\r\n prev_child_mid = 0 \r\n for node in level:\r\n curr_child_mid = node.children[0].str_pos + len(str(node.children[0].keys))//2\r\n space_between_children = curr_child_mid - prev_child_mid \r\n branch.extend((\" \"*space_between_children, \"|\"))\r\n prev_child_mid = curr_child_mid + 1\r\n for child in node.children[1:]:\r\n curr_child_mid = child.str_pos + len(str(child.keys))//2\r\n space_between_children = curr_child_mid - prev_child_mid \r\n branch.extend((\"-\"*space_between_children, \"|\"))\r\n prev_child_mid = curr_child_mid + 1\r\n \r\n branch = \"\".join(branch)\r\n branch = \"\".join((branch, \"\\n\", branch.replace(\"-\", \" \"), \"\\n\\n\"))\r\n yield branch\r\n\r\n yield \"\"", "def build_hierarchical_dataframe(df, levels, value_column):\n df_all_trees = pd.DataFrame(columns=['id', 'parent', 'value'])\n for i, level in enumerate(levels):\n df_tree = pd.DataFrame(columns=['id', 'parent', 'value'])\n dfg = df.groupby(levels[i:]).sum()\n dfg = dfg.reset_index()\n df_tree['id'] = dfg[level].copy()\n if i < len(levels) - 1:\n df_tree['parent'] = dfg[levels[i+1]].copy()\n else:\n df_tree['parent'] = 'All'\n df_tree['value'] = dfg[value_column]\n df_all_trees = df_all_trees.append(df_tree, ignore_index=True)\n total = pd.Series(dict(id='All', parent='',\n value=df[value_column].sum()\n ))\n df_all_trees = df_all_trees.append(total, ignore_index=True)\n\n df_all_trees[\"color\"] = [1 if df_all_trees.loc[i,\"id\"].startswith(\"White\")\n else 0.5 if df_all_trees.loc[i,\"id\"].startswith(\"All\")\n else 0 for i in range(len(df_all_trees))]\n\n df_all_trees['value'] = df_all_trees['value'].astype('int64')\n df_all_trees[\"percentage\"] = round(df_all_trees['value'] / df_all_trees[df_all_trees['id'] == 'All']['value'].tolist()[0] * 100, 2)\n df_all_trees\n\n df_all_trees = df_all_trees[df_all_trees[\"id\"]!= df_all_trees[\"parent\"]]\n\n return df_all_trees", "def _create_hierarchies(metadata, levels, template):\n\n # Convert levels do an ordered dictionary for access by name\n levels = object_dict(levels)\n hierarchies = []\n\n # Construct hierarchies and assign actual level objects\n for md in metadata:\n if isinstance(md, compat.string_type):\n if not template:\n raise ModelError(\"Can not specify just a hierarchy name \"\n \"({}) if there is no template\".format(md))\n hier = template.hierarchy(md)\n else:\n md = dict(md)\n level_names = md.pop(\"levels\")\n hier_levels = [levels[level] for level in level_names]\n hier = Hierarchy(levels=hier_levels, **md)\n\n hierarchies.append(hier)\n\n return hierarchies", "def build_hierarchical_dataframe(df, levels, value_column, color_columns=None):\n df_all_trees = pd.DataFrame(columns=['id', 'parent', 'value', 'color'])\n for i, level in enumerate(levels):\n df_tree = pd.DataFrame(columns=['id', 'parent', 'value', 'color'])\n dfg = df.groupby(levels[i:]).sum()\n dfg = dfg.reset_index()\n df_tree['id'] = dfg[level].copy()\n if i < len(levels) - 1:\n df_tree['parent'] = dfg[levels[i+1]].copy()\n else:\n df_tree['parent'] = 'total'\n df_tree['value'] = dfg[value_column]\n df_tree['color'] = dfg[color_columns[0]] / dfg[color_columns[1]]\n df_all_trees = df_all_trees.append(df_tree, ignore_index=True)\n total = pd.Series(dict(id='total', parent='',\n value=df[value_column].sum(),\n color=df[color_columns[0]].sum() / df[color_columns[1]].sum()))\n df_all_trees = df_all_trees.append(total, ignore_index=True)\n return df_all_trees", "def build_tree(self):\n intervals = self.intervals\n\n # sort all endpoints and make intervals for leaf nodes\n endpoints = []\n for interval in intervals:\n endpoints.append(interval.left_endpoint)\n endpoints.append(interval.right_endpoint)\n endpoints.append(float('inf'))\n endpoints.append(float('-inf'))\n\n endpoints.sort()\n unique_endpoints = []\n for i, ep in enumerate(endpoints):\n if i + 1 < len(endpoints) and ep == endpoints[i + 1]:\n continue\n else:\n unique_endpoints.append(ep)\n\n # append tuples for making intervals:\n # (left_endpoint, right_endpoint, l_closed, r_closed)\n # if left_enpoint == right_endpoint: it represents a point\n endpoints = unique_endpoints\n elements = []\n for i, ep in enumerate(endpoints):\n if i == 0:\n prev = ep\n continue\n elif i < len(endpoints) - 1:\n elements.append((prev, ep, False, False))\n elements.append((ep, ep, True, True))\n prev = ep\n else: # i == len(endpoints)-1\n elements.append((prev, ep, False, False))\n\n num_leaves = len(elements)\n\n max_depth = int(math.log(num_leaves) / math.log(2)) + 1\n num_last_leaves = 2 * (num_leaves - 2**(max_depth - 1))\n\n # build tree from bottom to up\n\n # make a queue for each depth\n q = []\n for i, elem in enumerate(elements):\n if i < num_last_leaves:\n if i % 2 == 0:\n prev = elem\n else:\n left_node = TreeNode(*prev)\n right_node = TreeNode(*elem)\n node = TreeNode(prev[0], elem[1], prev[2], elem[3])\n node.left = left_node\n node.right = right_node\n q.append(node)\n else:\n node = TreeNode(*elem)\n q.append(node)\n\n # while depth > 0\n while len(q) > 1:\n tmp_q = []\n for i, node in enumerate(q):\n if i % 2 == 0:\n prev = node\n else:\n left_ep = prev.left_endpoint\n right_ep = node.right_endpoint\n l_closed = prev.left_closed\n r_closed = node.right_closed\n new_node = TreeNode(left_ep, right_ep, l_closed, r_closed)\n new_node.left = prev\n new_node.right = node\n tmp_q.append(new_node)\n q = tmp_q\n\n self.root = q[0]\n\n # add canonical subsets\n for interval in intervals:\n self._append_subset(self.root, interval)\n\n return self.root", "def gen_level(\n root_path,\n floor_file = 'floor.lines',\n walls_file = 'walls.lines',\n windows_file = 'windows.lines',\n doors_file = 'doors.lines',\n handrails_file = 'handrails.lines',\n stairs_file = 'stairs.lines'):\n \n def internal(\n floor_thickness = .3,\n walls_thickness = .2, \n walls_height = 3, \n windows_fn = default_window(),\n doors_fn = default_door(),\n doors_height = 2.3,\n handrails_height = 1.2,\n handrails_thickness = .1):\n \n floor = gen_floor(root_path + floor_file, floor_thickness)\n \n walls = w8.gen_walls(\n root_path + walls_file, \n walls_thickness, \n walls_height + floor_thickness, \n external = False)\n \n windows = gen_windows(\n root_path + windows_file, \n windows_fn,\n walls_height + floor_thickness)\n \n doors, doorways = gen_doors(\n root_path + doors_file, \n doors_fn, \n doors_height)\n \n handrails = w8.gen_walls(\n root_path + handrails_file, \n handrails_thickness, \n handrails_height + floor_thickness,\n external = False)\n \n stair_foot = gen_stairs_foot(root_path + stairs_file)\n walls = DIFFERENCE([walls, T(3)(floor_thickness)(doorways)])\n \n return walls, windows, doors, handrails, floor, stair_foot\n \n return internal", "def intialize_hierarchy_paths(self):\n\n leaf_nodes = [node[0] for node in self.tree.out_degree if node[1] == 0]\n paths = [self.tree_utils.determine_path_to_root([node]) for node in leaf_nodes]\n\n # Normalize paths per level in hierarchy - currently the nodes are of increasing number throughout the tree.\n normalized_paths = [self.tree_utils.normalize_path_from_root_per_level(path) for path in paths]\n\n normalized_encoder = {'Root': {'original_key': 0, 'derived_key': 0}}\n normalized_decoder = { 0: {'original_key': 0, 'value': 'Root'}}\n decoder = dict(self.tree.nodes(data=\"name\"))\n encoder = dict([(value, key) for key, value in decoder.items()])\n\n #initiaize encoders\n for path, normalized_path in zip(paths, normalized_paths):\n key = path[-1]\n derived_key = normalized_path[-1]\n if key in leaf_nodes:\n normalized_encoder[decoder[key]] = {'original_key': key, 'derived_key': derived_key}\n normalized_decoder[derived_key] = {'original_key': key, 'value': decoder[key]}\n\n oov_path = [[0, 0, 0]]\n normalized_paths = oov_path + normalized_paths\n\n #Align length of paths if necessary\n longest_path = max([len(path) for path in normalized_paths])\n\n # Sort paths ascending\n sorted_normalized_paths = []\n for i in range(len(normalized_paths)):\n found_path = normalized_paths[0]\n for path in normalized_paths:\n for found_node, node in zip(found_path,path):\n if found_node > node:\n found_path = path\n break\n\n if not (found_path is None):\n sorted_normalized_paths.append(found_path)\n normalized_paths.remove(found_path)\n\n return normalized_encoder, normalized_decoder, sorted_normalized_paths", "def getLevels():", "def levels_for_depth(self, depth, drilldown=False):\n\n depth = depth or 0\n extend = 1 if drilldown else 0\n\n if depth + extend > len(self.levels):\n raise HierarchyError(\"Depth %d is longer than hierarchy \"\n \"levels %s (drilldown: %s)\" %\n (depth, self._levels, drilldown))\n\n return self.levels[0:depth + extend]", "def create_hierarchy(self):\n\t\tif self.level is not None:\n\t\t\treturn\n\t\t\n\t\tself.size = 0\n\t\tsubtype = self.subtype.type\n\t\tif subtype.level is None:\n\t\t\tif self.subtype.size == 0:\n\t\t\t\traise ParserException(\"Loop in the definition of '%s' and '%s' detected!\" % (self.name, self.subtype.name))\n\t\t\tsubtype.create_hierarchy()\n\t\t\n\t\tself.level = subtype.level + 1\n\t\tself.size = subtype.size", "def get_subdiagrams_grouped_by_level(self):\n subds = []\n\n def get_subds_gbl_rec(node, level):\n \"\"\"\n The recursive call\n \"\"\"\n try:\n subds[level] = subds[level].union({node})\n except IndexError:\n subds.append({node})\n if not isinstance(node, Leaf):\n for child in node.child_nodes:\n get_subds_gbl_rec(node.child_nodes[child][0], level+1)\n\n get_subds_gbl_rec(self, 0)\n return subds", "def __fill_levels_dict(parent=self.root, height=1):\n if not parent.children:\n return # we've already added the parent to the appropriate level\n elif height not in levels_dict:\n levels_dict[height] = []\n for child in parent.children:\n levels_dict[height].append(child.value) # add to the levels dict\n __fill_levels_dict(parent=child, height=height+1) # recursively traverse", "def levels(self):\n raise NotImplementedError(\"Subclasses sohuld implement levels\")", "def get_levels(self) -> List[List[RTreeNode[T]]]:\n levels: List[List[RTreeNode[T]]] = []\n fn = partial(_add_node_to_level, levels)\n # noinspection PyTypeChecker\n self.traverse_level_order(fn)\n return levels", "def levelorder(root):\n h = height(root)\n for i in range(1, h + 1):\n print_level(root, i)", "def create_Treeby_level(root, levelor, i, n):\n if i < n:\n temp = BinaryTreeNode(levelor[i])\n root = temp\n\n root.left = create_Treeby_level(root.left, levelor, 2 * i + 1, n)\n root.right = create_Treeby_level(root.right, levelor, 2 * i + 2, n)\n return root", "def show_hierarchy_chart(self):\n\n chart_type_index = self.ui.comboBox_sunburst_charts.currentIndex()\n if chart_type_index < 1:\n return\n self.get_selected_categories_and_codes()\n self.helper_for_matching_category_and_code_name()\n if chart_type_index == 1: # Code frequency sunburst\n self.hierarchy_code_frequency(\"sunburst\")\n if chart_type_index == 2: # Code frequency treemap\n self.hierarchy_code_frequency(\"treemap\")\n if chart_type_index == 3: # Code by characters sunburst\n self.hierarchy_code_volume_by_characters(\"sunburst\")\n if chart_type_index == 4: # Code by characters treemap\n self.hierarchy_code_volume_by_characters(\"treemap\")\n if chart_type_index == 5: # Code by image area sunburst\n self.hierarchy_code_volume_by_area(\"sunburst\")\n if chart_type_index == 6: # Code by image area treemap\n self.hierarchy_code_volume_by_area(\"treemap\")\n if chart_type_index == 7: # Code by A/V sunburst\n self.hierarchy_code_volume_by_segments(\"sunburst\")\n if chart_type_index == 8: # Code by A/V treemap\n self.hierarchy_code_volume_by_segments(\"treemap\")\n self.ui.comboBox_sunburst_charts.setCurrentIndex(0)", "def init(cls, levels: List[str]) -> List[Level]:\n return [cls(lvl, val) for val, lvl in enumerate(levels)]", "def create_hierarchy(self):\n\t\tpass", "def build_switches(self , level, last_level , parent_sws = [] , total_sw_count = 0):\n if level == last_level : return parent_sws\n sws = []\n # cantidad de switches en este nivel\n sw_count = 2**level\n lower_bound = total_sw_count\n upper_bound = lower_bound + sw_count\n for i in range(lower_bound , upper_bound):\n sw_id = i + 1\n # creo un switch\n sw = self.addSwitch('s%s' % sw_id)\n if level == 0 : self.root_sw = sw\n sws.append(sw)\n # conecto el nuevo switch con todos los switches padre\n for parent_sw in parent_sws:\n self.addLink(sw, parent_sw)\n # los switches creados en este nivel seran los padres del nivel siguiente\n return self.build_switches(level + 1 , last_level , sws , total_sw_count + sw_count)", "def get_levels(tree, level=0):\n if type(tree) == list:\n return [level]+get_levels(tree[0], level+1)+get_levels(tree[1], level+1)\n elif type(tree) == tuple:\n return [level, level]+get_levels(tree[1], level+1)\n else:\n return [level]", "def __fill_consecutive_tree_levels(parent=self.root):\n for child in parent.children:\n lst.append(child.value)\n __fill_consecutive_tree_levels(parent=child) # call recursively", "def getHierarchies():", "def getHierarchies():", "def to_tree(self, level_axes, leaf_axes):\n def _to_tree(xa, ans):\n if len(ans) != len(leaf_axes):\n return OrderedDict((dv, _to_tree(suba, ans[1:]))\n for dv, suba in xa.split(ans[0]).iteritems())\n else:\n return xa\n\n xarray = self.reorient(level_axes + leaf_axes)\n return _to_tree(xarray, xarray.axes_names)", "def generate_hierarchy(self,descr):\n # assert the existence of all the keys we need to set up at least on level\n assert 'problem_class' in descr\n assert 'problem_params' in descr\n assert 'dtype_u' in descr\n assert 'dtype_f' in descr\n assert 'sweeper_class' in descr\n assert 'level_params' in descr\n\n # convert problem-dependent parameters consisting of dictionary of lists to a list of dictionaries with only a\n # single entry per key, one dict per level\n pparams_list = self.__dict_to_list(descr['problem_params'])\n # put this newly generated list into the description dictionary (copy to avoid changing the original one)\n descr_new = cp.deepcopy(descr)\n descr_new['problem_params'] = pparams_list\n # generate list of dictionaries out of the description\n descr_list = self.__dict_to_list(descr_new)\n\n # sanity check: is there a transfer class? is there one even if only a single level is specified?\n if len(descr_list) > 1:\n assert 'transfer_class' in descr_new\n assert 'transfer_params' in descr_new\n elif 'transfer_class' in descr_new:\n print('WARNING: you have specified transfer classes, but only a single level...')\n\n # generate levels, register and connect if needed\n for l in range(len(descr_list)):\n\n # check if we have a hook on this list. if not, use default class.\n if 'hook_class' in descr_list[l]:\n hook = descr_list[l]['hook_class']\n else:\n hook = hookclass.hooks\n\n if 'sweeper_params' in descr_list[l]:\n swparams = descr_list[l]['sweeper_params']\n else:\n swparams = {}\n\n if not 'collocation_class' in swparams:\n assert 'collocation_class' in descr_list[l]\n swparams['collocation_class'] = descr_list[l]['collocation_class']\n\n if not 'num_nodes' in swparams:\n assert 'num_nodes' in descr_list[l]\n swparams['num_nodes'] = descr_list[l]['num_nodes']\n\n L = levclass.level(problem_class = descr_list[l]['problem_class'],\n problem_params = descr_list[l]['problem_params'],\n dtype_u = descr_list[l]['dtype_u'],\n dtype_f = descr_list[l]['dtype_f'],\n sweeper_class = descr_list[l]['sweeper_class'],\n sweeper_params = swparams,\n level_params = descr_list[l]['level_params'],\n hook_class = hook,\n id = 'L'+str(l))\n\n self.register_level(L)\n\n if l > 0:\n self.connect_levels(transfer_class = descr_list[l]['transfer_class'],\n transfer_params = descr_list[l]['transfer_params'],\n fine_level = self.levels[l-1],\n coarse_level = self.levels[l])", "def __init__(self, name, levels, label=None, info=None, description=None):\n\n super(Hierarchy, self).__init__(name, label, description, info)\n\n if not levels:\n raise ModelInconsistencyError(\"Hierarchy level list should \"\n \"not be empty (in %s)\" % self.name)\n\n if any(isinstance(level, compat.string_type) for level in levels):\n raise ModelInconsistencyError(\"Levels should not be provided as \"\n \"strings to Hierarchy.\")\n\n self._levels = object_dict(levels)", "def Levels(self):\n raise NotImplementedError(\"Subclass must implement abstract method\")", "def levels(self):\n return np.array(self._levels()).T", "def level_sets(self):\n in_degrees = self.in_degree(labels=True)\n level = [x for x in in_degrees if in_degrees[x]==0]\n Levels = []\n while len(level) != 0:\n Levels.append(level)\n new_level = []\n for x in level:\n for y in self.neighbors_out(x):\n in_degrees[y] -= 1\n if in_degrees[y] == 0:\n new_level.append(y)\n level = new_level\n return Levels", "def level_deserialize(self, data):\n if not data: return None\n data = data.split(',')\n\n def make_node(i):\n if i >= len(data): return None\n if data[i] == 'None': return None\n return TreeNode(int(data[i]))\n\n root = make_node(0)\n q = [root]\n i = 1\n while q:\n new_q = []\n for node in q:\n left_node, i = make_node(i), i + 1\n new_q.append(left_node)\n if node: node.left = left_node\n\n right_node, i = make_node(i), i + 1\n new_q.append(right_node)\n if node: node.right = right_node\n\n if i >= len(data): break\n q = new_q\n return root", "def flatten(self):\r\n df = pd.DataFrame([{'level_0': self.root.id}])\r\n for lvl in range(1, self.max_depth()+1):\r\n loc_pairs = [(l.parent.id, l.id) for l in self.level_n_descendants(lvl)]\r\n loc_pairs = pd.DataFrame(loc_pairs)\r\n loc_pairs.rename(columns={\r\n 0: 'level_'+str(lvl-1),\r\n 1: 'level_'+str(lvl)}, inplace=True)\r\n df = df.merge(loc_pairs, on='level_'+str(lvl-1), how='left')\r\n df['leaf_node'] = df.apply(lambda x:\r\n next(l for l in reversed(x) if pd.notnull(l)), axis=1)\r\n\r\n for c in df.columns:\r\n try:\r\n df[c] = df[c].astype('int')\r\n except:\r\n pass\r\n\r\n return df", "def getLevels(self):\n levels = self.levels.keys()\n levels.sort()\n a = str(levels)\n \n logger.info('[biospatial.gbif.taxonomy.NestedTaxonomy]\\n Available Levels %s' %a)\n return a", "def createHierarchy(self, hierarchy):\n self.tprint('create_bd_cell -type hier ' + hierarchy)", "def _generate_hierarchy_string(self, skeleton):\n hierarchy_string = \"HIERARCHY\\n\"\n hierarchy_string += self._generate_joint_string(skeleton.root, skeleton, 0)\n return hierarchy_string", "def consecutive_tree_levels(self):\n lst = [self.root.value]\n\n def __fill_consecutive_tree_levels(parent=self.root):\n \"\"\" Fills a list of consecutive connections in it, in other words,\n traverses a tree from left to right \"\"\"\n for child in parent.children:\n lst.append(child.value)\n __fill_consecutive_tree_levels(parent=child) # call recursively\n\n __fill_consecutive_tree_levels()\n return lst", "def update_level(self):\n level = 1\n assigned_levels = set([])\n just_assigned = set([])\n for root in self.roots:\n for child in root.children:\n if child in just_assigned:\n continue\n child.level = level\n if len(child.children) == 0:\n continue\n just_assigned.add(child)\n assigned_levels = assigned_levels.union(just_assigned)\n\n level += 1\n leaves = [c for c in self.collectors if len(c.children) == 0]\n len_non_leaves = len(self.collectors) - len(leaves)\n self.update_level_for_non_leaves(\n level, assigned_levels, just_assigned, len_non_leaves\n )", "def print_level(self, node , level):\n if node is None and level == 1: \n self.level.append(None)\n elif node != None:\n # set the root level as the base case\n if level == 1: \n self.level.append(node)\n elif level > 1 : \n self.print_level(node.left , level - 1) \n self.print_level(node.right , level - 1) \n return self.level", "def get_nodes_on_level(self, level):\n nodes = [self.root]\n for i in range(level):\n children_nodes = []\n while nodes:\n node = nodes[0]\n children_nodes.append(node.left if node else None)\n children_nodes.append(node.right if node else None)\n nodes.remove(node)\n nodes = children_nodes\n return nodes", "def build_hierarchy_from_id_lookup(id_lookup_file=\"idlookups.csv\"):\n df_id_lookups = pd.read_csv(id_lookup_file, index_col=0)\n\n # The naming convention separates layers of the hierarchy with a colon ':', so we can break this into a list of descendents, and calculate the depth of the tree.\n df_id_lookups[\"parsed_name\"] = df_id_lookups.name.apply(lambda s: s.split(\": \"))\n df_id_lookups[\"depth\"] = df_id_lookups.parsed_name.apply(lambda d: len(d))\n\n # The two top nodes \"Biota\" and \"Physical\" are not prepended to their children, so we need to do this manually.\n # Manually define biota and physical children\n biota_kids = [\n \"Worms\",\n \"Sponges\",\n \"Seagrasses\",\n \"Molluscs\",\n \"Macroalgae\",\n \"Jellies\",\n \"Fishes\",\n \"Echinoderms\",\n \"Crustacea\",\n \"Cnidaria\",\n \"Bryozoa\",\n \"Bioturbation\",\n \"Bacterial mats\",\n \"Ascidians\",\n ]\n\n physical_kids = [\"Substrate\"]\n\n # Prepend them to name lists, and add to depth.\n biota_inds = df_id_lookups.parsed_name.apply(lambda d: d[0] in biota_kids)\n df_id_lookups.loc[biota_inds, \"depth\"] += 1\n df_id_lookups.loc[biota_inds, \"parsed_name\"] = df_id_lookups.loc[biota_inds, \"parsed_name\"].apply(\n lambda d: [\"Biota\"] + d\n )\n\n physical_inds = df_id_lookups.parsed_name.apply(lambda d: d[0] in physical_kids)\n df_id_lookups.loc[physical_inds, \"depth\"] += 1\n df_id_lookups.loc[physical_inds, \"parsed_name\"] = df_id_lookups.loc[physical_inds, \"parsed_name\"].apply(\n lambda d: [\"Physical\"] + d\n )\n\n # Create columns for ancestor and descendant lists.\n df_id_lookups[\"child_name\"] = df_id_lookups.parsed_name.apply(lambda d: d[-1])\n\n df_id_lookups[\"ancestor_id_list\"] = [get_ancestor_ids(d, df_id_lookups) for d in df_id_lookups.index]\n\n df_id_lookups[\"descendant_id_list\"] = [get_descendant_ids(d, df_id_lookups) for d in df_id_lookups.index]\n\n # Create a multilabel, one hot encoded bit vector for each class, taking into account the hierarchy of ancestors, and unspecified descendants.\n # We now want to represent this class hierarchy as a bit-vector. Each class index has a unique bit in the vector. A root level class will turn on a single bit. A depth 4 class will turn on 4 bits.\n df_id_lookups[\"bit_vector\"] = [get_bit_vector(d, df_id_lookups) for d in df_id_lookups.index]\n df_id_lookups\n\n return df_id_lookups", "def getHierarchies(unique_name=None):", "def make_level(self):\n level = Level(self.data['levelname'])\n self._set_common_attributes(level)\n self.level = level\n return level", "def get_tree_data(self, levels=float('inf')):\n if self.kind_id == content_kinds.TOPIC:\n node_data = {\n \"title\": self.title,\n \"kind\": self.kind_id,\n \"node_id\": self.node_id,\n \"studio_id\": self.id,\n }\n children = self.children.all()\n if levels > 0:\n node_data[\"children\"] = [c.get_tree_data(levels=levels - 1) for c in children]\n return node_data\n if self.kind_id == content_kinds.EXERCISE:\n return {\n \"title\": self.title,\n \"kind\": self.kind_id,\n \"count\": self.assessment_items.count(),\n \"node_id\": self.node_id,\n \"studio_id\": self.id,\n }\n return {\n \"title\": self.title,\n \"kind\": self.kind_id,\n \"file_size\": self.files.values('file_size').aggregate(size=Sum('file_size'))['size'],\n \"node_id\": self.node_id,\n \"studio_id\": self.id,\n }", "def get_levels(self, nodes, boundary_nodes): # pylint: disable=R0201\n # current distance = collection of all node ids with same minimum distance\n # from the outermost boundary\n levels = []\n\n nodes_with_same_distance = boundary_nodes\n distance = 0\n # keep the process going until we have gone through all the possible distances\n while nodes_with_same_distance:\n next_nodes_with_same_distance = []\n for node_id in nodes_with_same_distance:\n nodes[node_id]['distance'] = distance # this is only needed for 1st step\n # relations = all nodes connected to the node in question by a single edge\n for related_node_id in nodes[node_id]['relations']:\n # if we have not labeled this node yet, that means it must\n # have a distance 1 greater than the nodes we're iterating over\n if nodes[related_node_id].get('distance') is None:\n nodes[related_node_id]['distance'] = distance + 1\n next_nodes_with_same_distance.append(related_node_id)\n\n level_cycles, level_paths = self.identify_level_elements(nodes, nodes_with_same_distance) # pylint: disable=C0301\n levels.append({\n 'node_ids': nodes_with_same_distance,\n 'cycles': level_cycles,\n 'paths': level_paths\n })\n distance += 1\n nodes_with_same_distance = next_nodes_with_same_distance\n\n return levels", "def get_levels(self, arcs: List[Dict[str, Any]]) -> Dict[Tuple[int, int, str], int]:\n arcs = [dict(t) for t in {tuple(sorted(arc.items())) for arc in arcs}]\n length = max([arc[\"end\"] for arc in arcs], default=0)\n max_level = [0] * length\n levels = {}\n for arc in sorted(arcs, key=lambda arc: arc[\"end\"] - arc[\"start\"]):\n level = max(max_level[arc[\"start\"] : arc[\"end\"]]) + 1\n for i in range(arc[\"start\"], arc[\"end\"]):\n max_level[i] = level\n levels[(arc[\"start\"], arc[\"end\"], arc[\"label\"])] = level\n return levels", "def create_tree(self):\n feature_indices = []\n for i in self.estimator.tree_.feature:\n n_features = self.n_features\n if self.n_features > 1 or (self.n_features == 1 and i >= 0):\n feature_indices.append([str(j) for j in range(n_features)][i])\n indentation = 1 if self.target_language in ['java', 'js',\n 'php', 'ruby'] else 0\n return self.create_branches(\n self.estimator.tree_.children_left,\n self.estimator.tree_.children_right,\n self.estimator.tree_.threshold,\n self.estimator.tree_.value,\n feature_indices, 0, indentation)", "def level_order_traversal(self):\r\n level_queue = [self]\r\n next_level_queue = []\r\n curr_level = 0\r\n print (self.root)\r\n while level_queue:\r\n curr_node = level_queue.pop(0)\r\n if curr_node.left:\r\n next_level_queue.append(curr_node.left)\r\n if curr_node.right:\r\n next_level_queue.append(curr_node.right)\r\n if not level_queue:\r\n for nodes in next_level_queue:\r\n print (nodes.root , ' ' , end = '')\r\n print ()\r\n curr_level += 1\r\n level_queue = next_level_queue[:]\r\n next_level_queue = []", "def render_pyramid(pyr, levels):\n positionLst = []\n finalLst = []\n if levels > len(pyr):\n print(\"error. number of levels to display is more than max_levels\")\n width = 0\n\n for i in range(levels):\n # streching each layer\n pyr[i] = strech_helper(pyr[i])\n width += pyr[i].shape[1]\n positionLst.append((pyr[i].shape[0], pyr[i].shape[1]))\n\n for i in range(levels):\n zeros = np.zeros(shape=(pyr[0].shape[0], pyr[i].shape[1]))\n zeros[:positionLst[i][0], :positionLst[i][1]] = pyr[i]\n finalLst.append(zeros)\n res = np.concatenate(finalLst, axis=1)\n return res", "def create_level_maps(max_depth, level_tags, summary_tags):\n\n level_maps = []\n for i in range(0, max_depth - 1):\n level_map = dict()\n level = max_depth - i\n for summary in summary_tags[i]:\n all_sub_tags = [att for att in level_tags[i] if summary[:-4] == \".\".join(att.split('.')[:-1])]\n matching_tag_above = [att for att in level_tags[i + 1] if summary[:-4] == att]\n level_map[summary] = all_sub_tags + matching_tag_above\n\n level_maps.append(level_map)\n\n return level_maps", "def levels(root):\n # if Tree is empty\n if not root:\n return 0 \n\n #if leaf node return 1 (Bcz. leaf node is present at level 1)\n if root.left==None and root.right==None:\n return 1\n\n #recursively compute the levels of left and right subtree \n left_subtree_levels=levels(root.left)\n right_subtree_levels=levels(root.right)\n\n #compute the overall levels of tree\n total_levels =max(left_subtree_levels,right_subtree_levels)+1\n\n return total_levels", "def _set_level_depth(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n\n def _init_level(rec):\n if rec.level is None:\n if rec.parents:\n rec.level = min(_init_level(rec) for rec in rec.parents) + 1\n else:\n rec.level = 0\n return rec.level\n\n def _init_depth(rec):\n if rec.depth is None:\n if rec.parents:\n rec.depth = max(_init_depth(rec) for rec in rec.parents) + 1\n else:\n rec.depth = 0\n return rec.depth\n\n def _init_reldepth(rec):\n if not hasattr(rec, 'reldepth'):\n up_terms = rec.get_goterms_upper()\n if up_terms:\n rec.reldepth = max(_init_reldepth(rec) for rec in up_terms) + 1\n else:\n rec.reldepth = 0\n return rec.reldepth\n\n for rec in self.values():\n\n # Add invert relationships\n if has_relationship:\n if rec.depth is None:\n _init_reldepth(rec)\n\n # print(\"BBBBBBBBBBB1\", rec.id, rec.relationship)\n #for (typedef, terms) in rec.relationship.items():\n # invert_typedef = self.typedefs[typedef].inverse_of\n # # print(\"BBBBBBBBBBB2 {} ({}) ({}) ({})\".format(\n # # rec.id, rec.relationship, typedef, invert_typedef))\n # if invert_typedef:\n # # Add inverted relationship\n # for term in terms:\n # if not hasattr(term, 'relationship'):\n # term.relationship = defaultdict(set)\n # term.relationship[invert_typedef].add(rec)\n # print(\"BBBBBBBBBBB3\", rec.id, rec.relationship)\n\n if rec.level is None:\n _init_level(rec)\n\n if rec.depth is None:\n _init_depth(rec)", "def hierarchy(self):\n return [self] + (self.parent.hierarchy if self.parent else [])", "def printLevelOrder(root):\n print(\"---- printing below the level traversal of the tree -----\")\n h = height(root) \n for i in range(1, h+1): \n printGivenLevel(root, i) \n print(\"=========================================================\")", "def CalculateLevel(self, item, dc, level, y, x_colstart):\r\n\r\n # calculate position of vertical lines\r\n x = x_colstart + _MARGIN # start of column\r\n if self.HasAGWFlag(wx.TR_LINES_AT_ROOT):\r\n x += _LINEATROOT # space for lines at root\r\n if self.HasButtons():\r\n x += (self._btnWidth-self._btnWidth2) # half button space\r\n else:\r\n x += (self._indent-self._indent/2)\r\n \r\n if self.HasAGWFlag(wx.TR_HIDE_ROOT):\r\n x += self._indent * (level-1) # indent but not level 1\r\n else:\r\n x += self._indent * level # indent according to level\r\n \r\n # a hidden root is not evaluated, but its children are always\r\n if self.HasAGWFlag(wx.TR_HIDE_ROOT) and (level == 0):\r\n # a hidden root is not evaluated, but its\r\n # children are always calculated\r\n children = item.GetChildren()\r\n count = len(children)\r\n level = level + 1\r\n for n in xrange(count):\r\n y = self.CalculateLevel(children[n], dc, level, y, x_colstart) # recurse\r\n \r\n return y\r\n\r\n self.CalculateSize(item, dc)\r\n\r\n # set its position\r\n item.SetX(x)\r\n item.SetY(y)\r\n y += self.GetLineHeight(item)\r\n\r\n if not item.IsExpanded():\r\n # we don't need to calculate collapsed branches\r\n return y\r\n\r\n children = item.GetChildren()\r\n count = len(children)\r\n level = level + 1\r\n for n in xrange(count):\r\n y = self.CalculateLevel(children[n], dc, level, y, x_colstart) # recurse\r\n \r\n return y", "def derive_class_hierarchy():\n logger.info('Deriving class hierarchy ...')\n data = statistics.get_json_data('classes')\n\n hierarchy = defaultdict(dict)\n keys = ['i', 's', 'ai', 'as', 'sc', 'sb', 'r']\n\n for cid in data:\n for key in keys:\n if key in data[cid] and data[cid][key]:\n hierarchy[cid][key] = data[cid][key]\n\n statistics.update_json_data('classes/hierarchy', hierarchy)\n statistics.update_split_json_data('classes/hierarchy', hierarchy, 1000)", "def set_depth(self):\n if self.depth != None:\n return\n if not self.parents:\n self.depth = 0\n return\n for parent in self.parents:\n parent.set_depth()\n \n depths = [parent.depth for parent in self.parents]\n self.depth = max(depths) + 1", "def getHierarchy(unique_name):", "def getHierarchy(unique_name):", "def getHierarchy(unique_name):", "def _build_tree(nodes):\n\n tree = []\n\n for trace_id in nodes:\n node = nodes[trace_id]\n node.setdefault(\"children\", [])\n parent_id = node[\"parent_id\"]\n if parent_id in nodes:\n nodes[parent_id].setdefault(\"children\", [])\n nodes[parent_id][\"children\"].append(node)\n else:\n tree.append(node) # no parent => top-level node\n\n for node in nodes:\n nodes[node][\"children\"].sort(key=lambda x: x[\"info\"][\"started\"])\n\n return sorted(tree, key=lambda x: x[\"info\"][\"started\"])", "def create(self, title: str, levels: list, always_included: bool, elasticube: str = None) -> Resource:\n elasticube = elasticube if elasticube else self._elasticube\n data = {'title': title, 'levels': levels, 'alwaysIncluded': always_included}\n\n content = self._api.post(f'elasticubes/localhost/{elasticube}/hierarchies', data=data)\n return Hierarchy(self._api, content, elasticube)", "def generate_tree(csv_data: List[List[str]], order: List[str]) -> List[NodeList]:\n tree = []\n\n for row in csv_data:\n branch = generate_branch(row, order)\n if not branch:\n continue\n\n branch[0], root_result = level_exists(branch[0], tree)\n\n for i in range(len(branch) - 1):\n branch[i + 1], result = level_exists(branch[i + 1], branch[i][\"children\"])\n if not result:\n branch[i][\"children\"].append(branch[i + 1])\n\n if not root_result:\n tree.append(branch[0])\n return tree", "def GenerateHierarchies(self,id_range):\n classed = []\n file_list = self.GetFileList(id_range)\n if file_list:\n for fl in file_list:\n cls = self.GetClassesFromFile(fl)\n if cls:\n for c in cls:\n self.classes.append(c)\n\n for c in self.classes:\n if not self.Inherits(c):\n self.base_cls.append(c)\n self.classes.remove(c)\n\n for c in self.base_cls:\n kids = self.FindChilds(c)\n if c not in classed:\n root = self.AppendItem(self.root,self.CleanName(c))\n classed.append(c)\n if kids:\n for n in kids:\n root2 = self.AppendItem(root,self.CleanName(n))\n kids_kids = self.FindChilds(n)\n i = 0\n if kids_kids:\n kids_len = len(kids_kids)\n while kids_kids:\n try:\n kid = kids_kids[i]\n except: pass\n has_kid = self.FindChilds(kid)\n if not has_kid and i == kids_len: break\n i+=1\n root3 = self.AppendItem(root2,self.CleanName(kid))\n\n\n self.classes = []", "def build_tree(elem, level = 1024, remove_root = 0):\n if level <= 0:\n return None\n level -= 1\n\n lista = elem.objectValues()\n node = {}\n children = []\n\n for i in lista:\n result = (build_tree(i, level))\n if result:\n children.append(result)\n\n if remove_root:\n return children\n else:\n node[\"title\"] = get_id(elem)\n node[\"children\"] = []\n\n if len(lista):\n node[\"key\"] = get_id(elem)\n node[\"isFolder\"] = True\n\n if not len(node[\"children\"]):\n node[\"isLazy\"] = True\n\n node[\"children\"] = children\n\n return node", "def rollup(self, path, level=None):\n\n if level:\n last = self.level_index(level) + 1\n if last > len(path):\n raise HierarchyError(\"Can not roll-up: level '%s' – it is \"\n \"deeper than deepest element of path %s\" %\n (str(level), path))\n else:\n if len(path) > 0:\n last = len(path) - 1\n else:\n last = None\n\n if last is None:\n return []\n else:\n return path[0:last]", "def __init__(self, level):\n self.level = level\n self.my_map = {}\n self.my_level = []\n self.my_grid = []", "def build_upwards(data, dim_index, B, NodeClass, serializer,\n children=None,\n children_intervals=None):\n is_leaf = NodeClass == RangeLeaf\n # if this is the lowest level, sort by keys and set the children\n if is_leaf:\n data.sort(key=lambda dp: dp[dim_index][1])\n # set starts and ends for each child\n # a child is in the form: (child_data, start, end)\n # child_data is (id, id)\n # start and end are indices into the larger data list\n children = [(data_item[-1], data_item[dim_index][1]) for data_item in data]\n\n parents = []\n parent_intervals = []\n\n num_clusters = len(children) / B\n if len(children) % B != 0:\n num_clusters += 1\n\n # data[0][0][0] is the dimension name unfortunately...\n dim = data[0][dim_index][0]\n prev_leaf = None\n for i in range(num_clusters):\n # If there is more than one parent's worth of children, chop\n # off the first B in a chunk\n cluster = children[i * B:(i + 1) * B]\n cluster_start = i * B\n cluster_end = i * B + len(cluster)\n if not is_leaf:\n cluster_start = children_intervals[cluster_start][0]\n cluster_end = children_intervals[cluster_end - 1][1]\n\n # serialize parent's linked tree in next dimension\n linked_root = None\n # last element in data[0] is the id, if we're more than one before\n # that, there's still another dimension to build a tree for\n if dim_index < len(data[0]) - 2:\n # build the next dimension\n linked_root = build_upwards(data[cluster_start : cluster_end],\n dim_index + 1, B, RangeLeaf, serializer)\n\n if is_leaf:\n # if we're at the bottom of the last tree, pass in the full data item\n if dim_index == len(data[0]) - 2:\n parent = NodeClass(cluster, linked_root, dim, prev_leaf,\n data[cluster_start : cluster_end])\n else:\n parent = NodeClass(cluster, linked_root, dim, prev_leaf)\n else:\n parent = NodeClass(cluster, linked_root, dim, serializer)\n\n # then serialize parent\n serializer.dumps(parent)\n\n if is_leaf:\n prev_leaf = parent.pos\n\n # and add it to our collection\n parents.append((parent.pos, parent.min, parent.max))\n parent_intervals.append((cluster_start, cluster_end))\n i += 1\n\n # If we ended up with more than one parent, we need to give them\n # their own parents. Recurse.\n if len(parents) > 1:\n # On the recursive steps, the parents are always RangeNodes.\n return build_upwards(data, dim_index, B, RangeNode, serializer,\n parents, parent_intervals)\n\n return parents[0][0]", "def generate_level(level):\n seed = level * 69420 # multiply by 69420 to not have the seeds too close to each other\n random.seed(seed)\n dimensions = get_map_size(level)\n level_map = np.full(dimensions, -1)\n while -1 in level_map:\n choice = random.choice(np.argwhere(level_map == -1))\n next_index = (choice[0], choice[1])\n # get indices of the tiles next to the current index\n left_index, up_index, right_index, down_index = get_direction_indices(next_index)\n left = tile_needs_connection(left_index, level_map, has_connection_right)\n up = tile_needs_connection(up_index, level_map, has_connection_down)\n right = tile_needs_connection(right_index, level_map, has_connection_left)\n down = tile_needs_connection(down_index, level_map, has_connection_up)\n level_map[next_index] = get_tile(left, up, right, down)\n return un_solve(level_map)", "def leaves_by_depth(t, leaves=None, depth=0):\n if leaves is None:\n leaves = {}\n if is_leaf(t):\n leaves.setdefault(depth, []).append(root(t))\n else:\n for b in branches(t):\n leaves_by_depth(b, leaves, depth+1)\n return leaves", "def build_tree(self):\n stack = []\n self._handle_solo_node_case()\n while self.root_hash == None:\n if len(stack) >= 2 and stack[-1].height == stack[-2].height:\n mom = stack.pop()\n dad = stack.pop()\n child_hash = self.sha256Sum(mom.hash + dad.hash)\n child = self.Node(mom, dad, child_hash)\n self.node_table[child_hash] = child\n mom.child = child\n dad.child = child\n\n if child.height == self.max_height:\n self.root_hash = child.hash\n\n stack.append(child)\n elif len(self.leaves) > 0:\n leaf = self.leaves.pop()\n self.node_table[leaf.hash] = leaf\n stack.append(leaf)\n # Handle case where last 2 nodes do not match in height by \"graduating\"\n # last node\n else:\n stack[-1].height += 1\n self.is_built = True", "def get_hierarchy(self):\n hierarchy = [self]\n\n for component in reversed(sorted(self.components.keys())):\n hierarchy.extend(component.get_hierarchy())\n\n return hierarchy", "def __init__(self, dim, points, parent, root, rec):\n import numpy as np\n \n ##Assumption: 0 - left and down, 1 - right and up, 0 - x, 1 - y \n self.node = root\n self.parent = parent\n self.points = points\n self.children = [None, None]\n self.childlist = [[], []]\n self.rec = rec\n \n \n if(parent == None):\n self.depth = 0\n else:\n self.depth = self.parent.depth + 1\n \n self.dim = self.depth%dim\n \n if(self.parent == None): #ROOT - 0, BRANCH - 1, LEAF - 2\n self.type = \"ROOT\"\n \n elif(len(self.points) == 0):\n self.type = \"LEAF\"\n \n else:\n self.type = \"BRANCH\"\n \n self.update_rec()\n self.subdivide(dim)", "def _genealogy(self, rules):\n\n tree = { t:{'level':0, 'base':None} for t in self.schema.keys() }\n for t in tree:\n log.debug(\"checking ancestors of %s ...\", t)\n for r in rules:\n if re.match(r[0], t):\n b = r[1]\n if b == t:\n b = None\n tree[t]['base'] = b\n c = t\n l = tree[c]['level']\n while b is not None:\n log.debug(\" ... %s is derived from %s\", c, b)\n c = b\n try:\n tree[c]\n except KeyError:\n raise GenealogyError(\"Unknown base type '%s' \"\n \"in rules.\" % c)\n if c == t:\n raise GenealogyError(\"Loop in the genealogy tree \"\n \"detected.\")\n l = max(tree[c]['level'], l+1)\n tree[c]['level'] = l\n b = tree[c]['base']\n break\n\n # Check that there is only one root in the tree\n if len([t for t in tree if tree[t]['base'] is None]) != 1:\n raise GenealogyError(\"No unique root of genealogy tree.\")\n\n return tree", "def collapse_level(level):\n\n # type_set = set(map(type, level))\n # if type_set in set([int]):\n if all(map(lambda x: isinstance(x, (int, long)), level)):\n\n # If a level only contains consecutive integers, convert it into an\n # interval:\n level.sort()\n if cls.are_consecutive(level):\n return ['[%s:%s]' % (min(level), max(level)+1)]\n\n # If a level contains nonconsecutive integers, convert it into a\n # list:\n else:\n return ['['+','.join([str(i) for i in level])+']']\n\n elif all(map(lambda x: isinstance(x, basestring), level)):\n # elif type_set in set([str]):\n if len(level) == 1:\n return level\n else:\n return ['['+','.join([s for s in level])+']']\n else:\n level_int = sorted([x for x in level if isinstance(x, (int, long))])\n level_str = sorted([x for x in level if isinstance(x, basestring)])\n return collapse_level(level_int)+collapse_level(level_str)", "def setLevels(self, levels, update=True):\n if self._xp is None:\n self.levels = levels\n self._defferedLevels = levels\n return\n if levels is not None:\n levels = self._xp.asarray(levels)\n self.levels = levels\n self._effectiveLut = None\n if update:\n self.updateImage()", "def levels(self):\n return list(self._levels.values())", "def hierarchy(self):\n list = self.parent.hierarchy if self.parent else []\n list.append(self)\n return list", "def _get_level_ordering(self):\n # First, get a level for all layers:\n levels = {}\n for layer in self._layers:\n level = max(\n [levels[lay.name] for lay in self.incoming_layers(layer.name)] + [-1]\n )\n levels[layer.name] = level + 1\n max_level = max(levels.values())\n ordering = []\n for i in range(max_level + 1): # input to output\n layer_names = [\n layer.name for layer in self._layers if levels[layer.name] == i\n ]\n ordering.append(\n [\n (name, False, [x.name for x in self.incoming_layers(name)])\n for name in layer_names\n ]\n ) # (going_to/layer_name, anchor, coming_from)\n # promote all output banks to last row:\n for level in range(len(ordering)): # input to output\n tuples = ordering[level]\n index = 0\n for (name, anchor, none) in tuples[:]:\n if self._get_layer_type(name) == \"output\":\n # move it to last row\n # find it and remove\n ordering[-1].append(tuples.pop(index))\n else:\n index += 1\n # insert anchor points for any in next level\n # that doesn't go to a bank in this level\n # order_cache = {}\n for level in range(len(ordering)): # input to output\n tuples = ordering[level]\n for (name, anchor, fname) in tuples:\n if anchor:\n # is this in next? if not add it\n next_level = [\n (n, anchor) for (n, anchor, hfname) in ordering[level + 1]\n ]\n if (\n name,\n False,\n ) not in next_level: # actual layer not in next level\n ordering[level + 1].append(\n (name, True, fname)\n ) # add anchor point\n else:\n pass # finally!\n else:\n # if next level doesn't contain an outgoing\n # connection, add it to next level as anchor point\n for layer in self.outgoing_layers(name):\n next_level = [\n (n, anchor) for (n, anchor, fname) in ordering[level + 1]\n ]\n if (layer.name, False) not in next_level:\n ordering[level + 1].append(\n (layer.name, True, name)\n ) # add anchor point\n ordering = self._optimize_ordering(ordering)\n return ordering", "def draw_tree(self):\n\n print \"--- \" + str(self.name)\n \n def draw_child_tree(current, depth):\n \n for c in current.children:\n print depth * \" \" + \"|-- \" + str(c.name)\n if hasattr(c, 'children'):\n draw_child_tree(c, depth + 1)\n \n draw_child_tree(self, 1)\n \n return", "def _get_levels_lines(self, get_collisions=False):\n lvl_list = []\n lns_list = []\n col_list = []\n for ion in self.ions:\n\n ch_ion = convert_species_tuple2chianti_str(ion)\n reader = ChiantiIonReader(ch_ion)\n\n # Do not keep levels if lines are not available.\n try:\n lvl = reader.levels\n lns = reader.lines\n\n except ChiantiIonReaderError:\n logger.info(f'Missing levels/lines data for `{ch_ion}`.')\n continue\n\n lvl['atomic_number'] = ion[0]\n lvl['ion_charge'] = ion[1]\n\n # Indexes must start from zero\n lvl.index = range(0, len(lvl))\n lvl.index.name = 'level_index'\n lvl_list.append(reader.levels)\n\n lns['atomic_number'] = ion[0]\n lns['ion_charge'] = ion[1]\n lns_list.append(lns)\n\n if get_collisions:\n try:\n col = reader.collisions\n col['atomic_number'] = ion[0]\n col['ion_charge'] = ion[1]\n col_list.append(col)\n\n except ChiantiIonReaderError:\n logger.info(f'Missing collisional data for `{ch_ion}`.')\n\n levels = pd.concat(lvl_list, sort=True)\n levels = levels.rename(columns={'J': 'j'})\n levels['method'] = None\n levels['priority'] = self.priority\n levels = levels.reset_index()\n levels = levels.set_index(\n ['atomic_number', 'ion_charge', 'level_index'])\n levels = levels[['energy', 'j', 'label', 'method', 'priority']]\n\n lines = pd.concat(lns_list, sort=True)\n lines = lines.reset_index()\n lines = lines.rename(columns={'lower_level_index': 'level_index_lower',\n 'upper_level_index': 'level_index_upper',\n 'gf_value': 'gf'})\n\n # Kurucz levels starts from zero, Chianti from 1.\n lines['level_index_lower'] = lines['level_index_lower'] - 1\n lines['level_index_upper'] = lines['level_index_upper'] - 1\n\n lines = lines.set_index(['atomic_number', 'ion_charge',\n 'level_index_lower', 'level_index_upper'])\n lines['energy_upper'] = None\n lines['energy_lower'] = None\n lines['j_upper'] = None\n lines['j_lower'] = None\n lines = lines[['energy_upper', 'j_upper', 'energy_lower', 'j_lower',\n 'wavelength', 'gf']]\n\n lines['wavelength'] = u.Quantity(lines['wavelength'], u.AA).to('nm').value\n\n col_columns = ['temperatures', 'collision_strengths', 'gf', 'energy', 'ttype', 'cups']\n if get_collisions:\n collisions = pd.concat(col_list, sort=True)\n collisions = collisions.reset_index()\n collisions = collisions.rename(columns={'lower_level_index': 'level_index_lower',\n 'upper_level_index': 'level_index_upper',\n 'gf_value': 'gf',})\n collisions['level_index_lower'] -= 1\n collisions['level_index_upper'] -= 1\n collisions = collisions.set_index(['atomic_number', 'ion_charge',\n 'level_index_lower', 'level_index_upper'])\n collisions = collisions[col_columns]\n self.collisions = collisions\n\n self.levels = levels\n self.lines = lines\n self.version = versionRead()", "def parse(self,\n lvl_list: List[BeautifulSoup]):\n # isLgl (only mention)\n # lvlText (val=\"some text %num some text\")\n # numFmt (val=\"bullet\", \"decimal\")\n # pPr -> ind\n # pStyle -> pPr\n # rPr -> sz, bold, italic, underlined\n # start (w:val=\"1\")\n # suff (w:val=\"nothing\", \"tab\" - default, \"space\")\n # lvlRestart (w:val=\"0\")\n # restart - startOverride for each level\n for lvl in lvl_list:\n ilvl = lvl['w:ilvl']\n if ilvl not in self.levels:\n self.levels[ilvl] = {}\n\n if lvl.lvlText and lvl.lvlText['w:val']:\n # some characters in bullets are displayed incorrectly\n # replace them with the unicode equivalent\n # use mapping between hexadecimal code of windows characters and unicode characters\n # if hexadecimal code was found in mapping dictionary use it's unicode equivalent\n if hex(ord(lvl.lvlText['w:val'][0])) in mapping:\n self.levels[ilvl]['lvlText'] = mapping[hex(ord(lvl.lvlText['w:val'][0]))]\n else:\n self.levels[ilvl]['lvlText'] = lvl.lvlText['w:val']\n\n elif 'lvlText' not in self.levels[ilvl]:\n self.levels[ilvl]['lvlText'] = \"\"\n\n if lvl.isLgl:\n self.levels[ilvl]['numFmt'] = 'decimal'\n else:\n if lvl.numFmt:\n self.levels[ilvl]['numFmt'] = lvl.numFmt['w:val']\n elif 'numFmt' not in self.levels[ilvl]:\n self.levels[ilvl]['numFmt'] = 'none'\n\n if lvl.start:\n self.levels[ilvl]['start'] = int(lvl.start['w:val'])\n elif 'start' not in self.levels[ilvl]:\n self.levels[ilvl]['start'] = 1\n\n if lvl.lvlRestart:\n self.levels[ilvl]['lvlRestart'] = bool(int(lvl.lvlRestart['w:val']))\n elif 'lvlRestart' not in self.levels[ilvl]:\n self.levels[ilvl]['lvlRestart'] = True\n if 'restart' not in self.levels[ilvl]:\n self.levels[ilvl]['restart'] = self.properties['restart']\n\n if lvl.suff:\n self.levels[ilvl]['suff'] = getSuffix[lvl.suff['w:val']]\n elif 'suff' not in self.levels[ilvl]:\n self.levels[ilvl]['suff'] = getSuffix[\"tab\"]\n\n # extract information from paragraphs and runs properties\n if lvl.pStyle:\n self.levels[ilvl]['styleId'] = lvl.pStyle['w:val']\n elif 'styleId' not in self.levels[ilvl]:\n self.levels[ilvl]['styleId'] = None\n\n # paragraph -> run\n if lvl.pPr:\n self.levels[ilvl]['pPr'] = lvl.pPr\n elif 'pPr' not in self.levels[ilvl]:\n self.levels[ilvl]['pPr'] = None\n\n if lvl.rPr:\n self.levels[ilvl]['rPr'] = lvl.rPr\n elif 'rPr' not in self.levels[ilvl]:\n self.levels[ilvl]['rPr'] = None\n\n if lvl.startOverride:\n self.levels[ilvl]['restart'] = True\n self.levels[ilvl]['start'] = int(lvl.startOverride['w:val'])", "def _buildtree(self):\n self.pricetree = np.zeros((self.steps+1,self.steps+1))\n self.pricetree[0][0] = self.p\n for j in range(self.steps):\n for i in range(j+1):\n self.pricetree[j+1][i+1] = self.pricetree[j][i]*self.down\n self.pricetree[j+1][0] = self.pricetree[j][0]*self.up", "def get_go_hierarchy(self, go_id=None, go_cat=\"BP\"):\n rec = self.dict_go[go_id]\n set_parents = rec.get_all_parents()\n set_children = rec.get_all_children()\n list_go_parents = self._go_terms_to_list(go_cat=go_cat,\n set_go_terms=set_parents,\n relation=\"parent\")\n list_go_children = self._go_terms_to_list(go_cat=go_cat,\n set_go_terms=set_children,\n relation=\"child\")\n list_go_terms = list_go_parents + list_go_children\n columns = [\"GO_term\", \"level\", \"depth\", self.dict_go_ns[go_cat], \"relation\"]\n df_hierarchy = pd.DataFrame(list_go_terms, columns=columns)\n df_hierarchy.sort_values(by=\"depth\", inplace=True)\n df_hierarchy.reset_index(inplace=True, drop=True)\n return df_hierarchy", "def levelOrder(self, root: 'Node') -> List[List[int]]:\n if not root: return []\n level = []\n waiting = []\n result = []\n level.append(root)\n while level:\n current = []\n while level:\n tmp = level.pop(0)\n if not tmp:\n continue\n current.append(tmp.val)\n waiting.append(tmp)\n if len(current) > 0:\n result.append(current)\n while waiting:\n tmp = waiting.pop(0)\n for ch in tmp.children:\n level.append(ch)\n return result", "def test_level_depth(chikin):\n assert chikin.depth == 0\n assert str(chikin.section) == 'Chikin Tales'\n assert chikin.section.depth == 1\n assert chikin.section.subsection.depth == 2", "def fullfact(levels):\n n = len(levels) # number of factors\n nb_lines = np.prod(levels) # number of trial conditions\n H = np.zeros((nb_lines, n))\n \n level_repeat = 1\n range_repeat = np.prod(levels)\n for i in range(n):\n range_repeat //= levels[i]\n lvl = []\n for j in range(levels[i]):\n lvl += [j]*level_repeat\n rng = lvl*range_repeat\n level_repeat *= levels[i]\n H[:, i] = rng\n \n return H", "def construct_hierarchy_tree(top_down_hierarchy_dict):\n if not top_down_hierarchy_dict:\n return None\n\n all_employees = {}\n\n # do a breadth first search (BFS) on the hierarchy using a queue\n # the queue contains the employees' names initialized to the top most employee\n queue = [next(iter(top_down_hierarchy_dict))]\n\n # the top most supervisor\n root_employee = None\n\n while queue:\n employee_name = queue.pop(0)\n if employee_name not in all_employees:\n # this is for the root supervisor (E.g. CEO)\n supervisor_employee = Employee(name=employee_name)\n root_employee = supervisor_employee\n all_employees[employee_name] = supervisor_employee\n else:\n supervisor_employee = all_employees[employee_name]\n\n if employee_name not in top_down_hierarchy_dict:\n # we have reached the end of this branch\n continue\n else:\n # get the new top_down_hierarchy_dict with the current employee as the top most\n top_down_hierarchy_dict = top_down_hierarchy_dict[employee_name]\n\n for subordinate_name in top_down_hierarchy_dict:\n subordinate_employee = Employee(name=subordinate_name, supervisor=supervisor_employee)\n supervisor_employee.add_subordinate(subordinate_employee)\n all_employees[subordinate_name] = subordinate_employee\n queue.append(subordinate_name)\n return root_employee", "def resolve_top_level(*_):\n # pylint: disable=attribute-defined-outside-init\n data = TopLevel()\n subleaf1 = SubLeaf()\n subleaf1.value = \"subleaf1\"\n subleaf2 = SubLeaf()\n subleaf2.value = \"subleaf2\"\n leaf = Leaf()\n leaf.leaflets = [subleaf1, subleaf2]\n leaf.value = \"some leaf value\"\n data.leaf = leaf\n data.name = \"top level name\"\n return data", "def build_kdtree(points, depth=0):\n n = len(points) - 1\n\n if n <= 0:\n return None\n\n axis = depth % k\n\n sorted_points = sorted(points, key=lambda point: point[axis])\n\n return {\n 'point': sorted_points[n / 2],\n 'left': build_kdtree(sorted_points[:n / 2], depth + 1),\n 'right': build_kdtree(sorted_points[n / 2 + 1:], depth + 1)\n }", "def build_kdtree(points, depth=0):\n n = len(points) - 1\n\n if n <= 0:\n return None\n\n axis = depth % k\n\n sorted_points = sorted(points, key=lambda point: point[axis])\n\n return {\n 'point': sorted_points[n / 2],\n 'left': build_kdtree(sorted_points[:n / 2], depth + 1),\n 'right': build_kdtree(sorted_points[n / 2 + 1:], depth + 1)\n }", "def get_levels(std0, slope, nsigma):\n nslope = nsigma * slope\n levels = [0]\n while levels[-1] <= 1:\n levels.append((levels[-1] * (1 + nslope) + 2 * nsigma * std0) / (1 - nslope))\n levels.pop()\n return levels", "def make_tree(self, strings):\n # start from tree root\n root = self.tree()\n # mark if letter is the end of string or just its part\n root['end'] = set()\n root['part'] = set(self.s_options_total)\n for idx, string in enumerate(strings):\n node = root\n for letter in string:\n new_node = node[letter]\n new_node.setdefault('end', set())\n new_node.setdefault('part', set()).add(idx)\n node = new_node\n node['end'].add(idx)\n node['part'].remove(idx)\n\n return root", "def __str__(self):\r\n levels = tuple(self.generate_levels())\r\n self.compute_representation_positions()\r\n levels_to_strings = self.represent_tree_levels(levels)\r\n branches = self.represent_tree_branches(levels)\r\n\r\n return \"\".join(\"\".join((level, \"\\n\\n\", branch))\r\n for (level, branch) in zip(levels_to_strings, branches))", "def levelOrder(self, root: TreeNode) -> List[List[int]]:\n\n result = []\n if(root is None):\n return result\n\n q = deque([root])\n while(q):\n n = len(q)\n level = []\n for i in range(0,n):\n f = q.popleft()\n level.append(f.val)\n\n if (f.left is not None):\n q.append(f.left)\n if (f.right is not None):\n q.append(f.right)\n\n if(len(level) > 0):\n result.append(level[:])\n level.clear()\n return result", "def nlevels(self) -> int:\n return len(self._levels)" ]
[ "0.69105655", "0.67230135", "0.6641817", "0.6612042", "0.6433254", "0.64241004", "0.6208278", "0.6171888", "0.60765696", "0.6048084", "0.60298026", "0.60158795", "0.600957", "0.5995847", "0.59677976", "0.59607244", "0.59351313", "0.5911113", "0.590964", "0.5883543", "0.58677894", "0.5827673", "0.5810181", "0.57778037", "0.57088494", "0.56941986", "0.5670666", "0.56647867", "0.56647867", "0.56510645", "0.5631713", "0.5607054", "0.558486", "0.5554946", "0.5505474", "0.54971975", "0.54957515", "0.5494431", "0.54787713", "0.5447251", "0.5424588", "0.5408791", "0.54012024", "0.53945744", "0.5390953", "0.53735", "0.53339595", "0.532181", "0.53040266", "0.52372855", "0.52327436", "0.5222856", "0.52083236", "0.51916355", "0.51880896", "0.5171283", "0.5161943", "0.5148866", "0.5123283", "0.511985", "0.51092064", "0.5100122", "0.5100122", "0.5100122", "0.5093612", "0.5086046", "0.50745296", "0.50677305", "0.5066123", "0.5048671", "0.50467086", "0.5044439", "0.50442547", "0.50311565", "0.50308317", "0.5027098", "0.5011309", "0.5010176", "0.50031", "0.5002744", "0.49926192", "0.49844655", "0.4979458", "0.4976266", "0.49743757", "0.49722964", "0.4970078", "0.49696505", "0.49683502", "0.49681625", "0.4960434", "0.49563825", "0.49474508", "0.49438062", "0.49438062", "0.49434477", "0.4936384", "0.49348506", "0.4933269", "0.49323112" ]
0.6078447
8
Make an hashable representation of an object for hashlib
def hashable(obj): return bytes(str(obj), "utf-8")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hash(obj):\n \n import hashlib\n import pickle\n \n sha = hashlib.sha256()\n sha.update(pickle.dumps(obj))\n \n return sha.hexdigest()", "def hash_obj(self, obj):\r\n md5er = hashlib.md5()\r\n update_hash(md5er, obj)\r\n return md5er.hexdigest()", "def make_hash(o):\n if isinstance(o, (set, tuple, list)):\n return hash(tuple([make_hash(e) for e in o]))\n elif not isinstance(o, dict) and o.__class__.__module__ == 'builtins':\n return hash(o)\n elif not isinstance(o, dict):\n return make_hash(o.__dict__)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def hash(obj, hash_name='md5', coerce_mmap=False):\n if 'numpy' in sys.modules:\n hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)\n else:\n hasher = Hasher(hash_name=hash_name)\n return hasher.hash(obj)", "def hash(obj, hash_name='md5', coerce_mmap=False):\r\n if 'numpy' in sys.modules:\r\n hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)\r\n else:\r\n hasher = Hasher(hash_name=hash_name)\r\n return hasher.hash(obj)", "def deep_hash(obj):\n pass", "def pickle_and_hash(obj: Any) -> str:\n try:\n s = dill.dumps(obj)\n except:\n raise UnpickleableError()\n\n return hashlib.sha512(s).hexdigest()", "def make_hash(o):\n\n if isinstance(o, (set, tuple, list)):\n\n return hash( tuple([make_hash(e) for e in o]) )\n\n elif not isinstance(o, dict):\n\n return hash(o)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def dict_hash(obj, start=''):\n h = hashlib.sha1(to_bytes(start))\n h.update(to_bytes(obj.__class__.__name__))\n if isinstance(obj, dict):\n for key, value in sorted(obj.items()):\n h.update(to_bytes(key))\n h.update(to_bytes(dict_hash(value)))\n elif isinstance(obj, (list, tuple)):\n for el in obj:\n h.update(to_bytes(dict_hash(el)))\n else:\n # basic types\n if isinstance(obj, bool):\n value = str(int(obj))\n elif isinstance(obj, (six.integer_types, float)):\n value = str(obj)\n elif isinstance(obj, (six.text_type, bytes)):\n value = obj\n elif obj is None:\n value = b''\n else:\n raise ValueError(\"Unsupported value type: %s\" % obj.__class__)\n h.update(to_bytes(value))\n return h.hexdigest()", "def hashcode(o):", "def hash_simple_obj_to_hex(obj):\n\n hash_ = sha256()\n try:\n update_hash(hash_, obj)\n except ValueError as e:\n raise ValueError(\"%s (full object was %r)\" % (e, obj))\n return hash_.hexdigest()", "def hash(self) -> bytes:", "def object_sha256(obj):\n\n return hashlib.sha256(json.dumps(obj).encode()).hexdigest()", "def hash_data(obj):\n collect = sha1()\n for text in bytes_iter(obj):\n if isinstance(text, six.text_type):\n text = text.encode('utf-8')\n collect.update(text)\n return collect.hexdigest()", "def structural_hash(obj: object) -> bytes:\n hasher = hashlib.blake2b()\n if isinstance(obj, (int, str, float, PurePath)):\n hasher.update(bytes(\"P\" + str(obj), \"utf-8\"))\n elif dataclasses.is_dataclass(obj):\n fields = dataclasses.fields(obj)\n hasher.update(bytes(f\"O{len(fields)}\\x20\", \"utf-8\"))\n for field in sorted(fields, key=lambda x: x.name):\n if not field.metadata.get(\"nohash\"):\n hasher.update(bytes(f\"F{len(field.name)}\\x20{field.name}\", \"utf-8\"))\n hasher.update(structural_hash(getattr(obj, field.name)))\n elif isinstance(obj, (collections.abc.Sequence, collections.abc.Set)):\n hasher.update(bytes(f\"L{len(obj)}\\x20\", \"utf-8\"))\n for member in obj:\n child_hash = structural_hash(member)\n hasher.update(bytes(f\"E{len(child_hash)}\\x20\", \"utf-8\"))\n hasher.update(child_hash)\n elif isinstance(obj, collections.abc.Mapping):\n hasher.update(bytes(f\"M{len(obj)}\\x20\", \"utf-8\"))\n for key, member in obj.items():\n child_hash = structural_hash(member)\n hasher.update(\n bytes(f\"E{len(key)}\\x20{key}\\x20{len(child_hash)}\\x20\", \"utf-8\")\n )\n hasher.update(child_hash)\n elif isinstance(obj, enum.Enum):\n hasher.update(bytes(str(obj), \"utf-8\"))\n elif obj is None:\n hasher.update(b\"N\")\n else:\n raise TypeError(\"Unhashable type\", obj)\n\n return hasher.digest()", "def hash(self, oid):\n data = self.family_name + self.name +\\\n self.date_of_birth + self.date_of_issue +\\\n self.date_of_expiry + self.issuing_country +\\\n self.issuing_authority + self.license_number +\\\n \"\".join(self.categories_of_vehicles) +\\\n str(self.number_of_entries)\n if oid == 'id-sha1':\n digest = hashes.Hash(hashes.SHA1(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha224':\n digest = hashes.Hash(hashes.SHA224(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha256':\n digest = hashes.Hash(hashes.SHA256(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha384':\n digest = hashes.Hash(hashes.SHA384(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha512':\n digest = hashes.Hash(hashes.SHA512(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n else:\n print('ERROR: Hash algorithm not implemented.')\n sys.exit(1)", "def toHashable(self) -> str:\r\n\r\n return self.toHashBase().encode('utf-8')", "def __hash__(self):\n hashable = tuple(self.pandas_object.values.tobytes())\n if isinstance(self.pandas_object, pd.DataFrame):\n hashable += tuple(self.pandas_object.columns)\n else:\n hashable += tuple(self.pandas_object.name)\n return hash(hashable)", "def __hash__(self):\n return self.to_hash()", "def hash(self) -> str:\r\n ...", "def hash(space, w_object):\n return space.hash(w_object)", "def hash(self):\n raise NotImplementedError() # To be subclassed", "def object_hash(obj):\n try:\n code = obj.__code__.co_code\n except AttributeError:\n attrlist = [getattr(obj, name) for name in dir(obj)\n if not name.startswith('__')]\n codelist = [attr.__code__.co_code for attr in attrlist\n if hasattr(attr, '__code__')]\n code = b','.join(codelist)\n digest = hashlib.md5(code).hexdigest()\n return digest", "def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def __hash__(self):\n return hash(self.hash)", "def object_sha1(obj):\n\n return hashlib.sha1(json.dumps(obj).encode()).hexdigest()", "def hashkey(obj, salt=0):\n if isinstance(obj, str):\n return zlib.adler32(obj.encode(), salt) & 0xffffffff\n elif isinstance(obj, bytes):\n return zlib.adler32(obj, salt) & 0xffffffff\n elif isinstance(obj, datetime_type):\n return zlib.adler32(str(obj).encode(), salt) & 0xffffffff\n return hash(obj) & 0xffffffff", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self))", "def __hash__(self):\n return hash(str(self))", "def __hash__(self):\n return hash(str(self))", "def _make_hashable(items):\n\n def convert(x):\n # Perform any conversions here to make a variable hashable\n if isinstance(x, np.ndarray):\n # Create an sha1 of the data, and throw in a string\n # and the shape.\n return ('__type_np.ndarray', x.shape,\n xxhash.xxh3_128_hexdigest(x))\n elif isinstance(x, (list, tuple)):\n return _make_hashable(x)\n elif isinstance(x, dict):\n return _make_hashable(sorted(x.items()))\n return x\n\n return tuple(map(convert, items))", "def get_hash(thing):\n n = hashlib.sha256()\n \n if isinstance(thing,str):\n n.update(thing.encode('utf-8' ))\n elif isinstance(thing, bytes):\n n.update(thing)\n elif isinstance(thing,BeautifulSoup):\n n.update(get_hash(str(thing)))\n else:\n raise RuntimeError(\"unknown type: {}\".format(str(type(thing))))\n \n return(n.digest())", "def create_hash(*args):\n challenge_str = jsonpickle.encode(args)\n challenge_hash = hashlib.sha256(challenge_str.encode())\n return Bn.from_binary(challenge_hash.digest())", "def __hash__(self):\n return hash(tuple(sorted(self.__dict__.items())))", "def __hash__(self):\n return hash(tuple(sorted(self.__dict__.items())))", "def __hash__(self):\n return hash(tuple(sorted(self.__dict__.items())))", "def __hash__(self):\n return hash(tuple(sorted(self.__dict__.items())))", "def do_hash(dat: typing.Any) -> str:\n return hashlib.sha1(json.dumps(dat, sort_keys=True).encode('utf-8')).hexdigest()", "def __hash__(self):\n raise NotImplementedError", "def __hash__(self):\n\n return hash((str(self.type) + str(self.value)))", "def __hash__(self):\r\n return hash(tuple(sorted(self.__dict__.items())))", "def __hash__(self):\n return hash(self.joined())", "def __hash__(self):\n return self.value.__hash__()", "def update_hash(hasher, obj):\r\n hasher.update(str(type(obj)))\r\n if isinstance(obj, (tuple, list)):\r\n for e in obj:\r\n update_hash(hasher, e)\r\n elif isinstance(obj, dict):\r\n for k in sorted(obj):\r\n update_hash(hasher, k)\r\n update_hash(hasher, obj[k])\r\n else:\r\n hasher.update(repr(obj))", "def __hash__(self):\n return hash(repr(self))", "def __hash__(self):\n return hash(repr(self))", "def __hash__(self):\n return hash(repr(self))", "def hash(self):\n return self.__hash__()", "def _metatize_HashableNDArray(obj):\n return obj", "def coerce(self, value):\n if isinstance(value, bytes) and len(value) == self.bit_length:\n return HashString.from_b256(value)\n elif len(value) == self.b16_length:\n return HashString.from_b16(value)\n elif self.b64_length - len(value) <= 4:\n return HashString.from_b64(value)", "def hash(self):\n return hashlib.sha256(self.to_json().encode()).hexdigest()", "def __str__(self: Hash) -> str:\n return self.to_hex()", "def h(x):\n\n hasher = hashlib.sha256()\n hasher.update(x)\n return hasher.digest()", "def hash_key(self):", "def __hash__(self):\n return hash(str(self.__id__))", "def hash(self):\n return Hash.dhash(bytes(self))", "def __hash__(self):\n\t\treturn hash(repr(self))", "def __Hash(self):\n return self._Hash()", "def regist_hash(cobj, hash, handler, dir):\n pass", "def _makehash():\n return defaultdict(_makehash)", "def make_hash(attrs, usekeys=None, N=10):\n if usekeys is None:\n d = attrs\n else:\n d = {k: attrs.get(k, None) for k in usekeys}\n\n s = json.dumps(d, sort_keys=True, cls=JSONEncoder).encode()\n return hashlib.sha1(s).hexdigest()[:N]", "def hash(self) -> bytes:\n block_string = json.dumps(self.serialize(), sort_keys=True).encode()\n return bytes.fromhex(hashlib.sha256(block_string).hexdigest())", "def get_hash(self):\n return self.__hash", "def hashable(cls):\n\n # py2\n assert \"__hash__\" in cls.__dict__\n # py3\n assert cls.__dict__[\"__hash__\"] is not None\n assert \"__eq__\" in cls.__dict__\n\n cls.__ne__ = lambda self, other: not self.__eq__(other)\n\n return cls", "def __hash__(self):\n hash_value = 0\n \n # required\n hash_value ^= self.required << 14\n \n # title\n hash_value ^= hash(self.title)\n \n # type\n hash_value ^= hash(self.type)\n \n # values\n values = self.values\n if (values is not None):\n hash_value ^= len(values)\n \n for value in values:\n hash_value ^= hash(value)\n \n return hash_value", "def md5(obj):\n import hashlib\n # print \"self.conf\", str(self.conf)\n # if type(obj) is not str:\n # obj = str(obj)\n # print('type(obj)', type(obj))\n m = hashlib.md5(obj.encode())\n return m", "def __hash__(self):\n obj = (self.make, self.model, self.year, self.mpg)\n return hash(obj)", "def __hash__( self ):\n return hash( self.data )", "def gen_model_hash(json_: str) -> str:\n # get hash of dict\n hash_ = hashlib.md5() # nosec\n encoded = json_.encode()\n hash_.update(encoded)\n return hash_.hexdigest()", "def geometry_hash(geometry):\n if hasattr(geometry, 'md5'):\n # for most of our trimesh objects\n md5 = geometry.md5()\n elif hasattr(geometry, 'tostring'):\n # for unwrapped ndarray objects\n md5 = str(hash(geometry.tostring()))\n\n if hasattr(geometry, 'visual'):\n # if visual properties are defined\n md5 += str(geometry.visual.crc())\n return md5", "def digest(o):\n ser = serialize(o)\n return _truncated_digest(ser.encode(enc)).decode(enc)", "def __hash__(self):\n return super().__hash__()", "def hash(self):\n return self.hash_by_id(self.id)", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "def __get_hashstr(_config_object: dict):\n hashobj = hashlib.md5()\n json_str = json.dumps(_config_object, sort_keys=True).encode('utf-8')\n hashobj.update(json_str)\n dig = hashobj.hexdigest()\n return dig\n # return hashobj.update(json.dumps(_config_object, sort_keys=True).encode('utf-8')).hexdigest()", "def hash(self):\n return hashlib.sha1(str(self._dict))", "def json_hash(obj: Any, encoder: type[json.JSONEncoder] | None = CoercingEncoder) -> str:\n json_str = json.dumps(obj, ensure_ascii=True, allow_nan=False, sort_keys=True, cls=encoder)\n return hash_all([json_str])", "def __hash__(self):\n return hash((self.type, self.data))", "def _build_hash(target, meta_type):\n digest = hashlib.sha1(target.encode('ascii')).hexdigest() # nosec\n return meta_type + digest", "def __hash__(self):\n hash_value = 0\n \n # approximate_online_count\n hash_value ^= self.approximate_online_count\n \n # approximate_user_count\n hash_value ^= self.approximate_user_count << 12\n \n # description\n description = self.description\n if (description is not None):\n hash_value ^= hash(description)\n \n # discovery_splash\n hash_value ^= hash(self.discovery_splash)\n \n # emojis\n emojis = self.emojis\n hash_value ^= len(emojis) << 1\n for emoji in emojis.values():\n hash_value ^= hash(emoji)\n \n # features\n features = self.features\n hash_value ^= len(features) << 5\n for feature in features:\n hash_value ^= hash(feature)\n \n # icon\n hash_value ^= hash(self.icon)\n \n # id\n hash_value ^= self.id\n \n # invite_splash\n hash_value ^= hash(self.invite_splash)\n \n # stickers\n stickers = self.stickers\n hash_value ^= len(stickers) << 9\n for sticker in stickers.values():\n hash_value ^= hash(sticker)\n \n # name\n name = self.name\n if (description is None) or (description != name):\n hash_value ^= hash(name)\n \n return hash_value", "def __hash__(self) -> int:\n return hash(self._hashable_content())", "def HashAlgorithm(self) -> _n_7_t_0:", "def get_hash(self):\n return freeze_dict(self.get_hash_params())", "def nice_hash(*args):\n h = sha1()\n for item in args:\n h.update(unicode(item))\n return b32encode(h.digest())", "def HashValue(self) -> _n_0_t_3[_n_0_t_9]:", "def hash(self):\n hash_properties = self.artifacts\n return hashlib.md5(','.join(hash_properties).encode()).hexdigest()", "def __hash__(self, reinit=False):\n if not self.hash_value is None and not reinit:\n return self.hash_value\n elif isinstance(self, Leaf):\n self.hash_value = Hash.leaf_hash(self)\n return self.hash_value\n else:\n self.hash_value = Hash.node_hash(self)\n return self.hash_value", "def __hash__(self):\n hash(self.components)", "def __hash__(self):\n # These entities are not cached, so we wont use their `id` if applicable.\n hash_value = 0\n \n # bot\n hash_value ^= hash(self.bot)\n \n # description\n description = self.description\n if (description is not None):\n hash_value ^= hash(description)\n \n # icon\n hash_value ^= hash(self.icon)\n \n # id\n hash_value ^= self.id\n \n # name\n name = self.name\n if (description is None) or (description != name):\n hash_value ^= hash(name)\n \n return hash_value", "def __hash__(self):\n return hash(self.get_canonical_identifier())", "def get_hash(self):\r\n return", "def internal_hash(self): \n return hash(tuple(sorted(self.hashtriples())))", "def __hash__(self):\n return self['id'].__hash__()", "def internal_hash(self):\r\n return _TripleCanonicalizer(self).to_hash()", "def __hash__(self) -> int:", "def hash_me(cls, p_str, p_len=64):\n v_hash = str()\n v_len = EC.SHA256 if p_len is None else EC.SHA256 if p_len not in EC.HASH_ALGO else p_len\n if v_len == EC.SHA512:\n v_hash = hashlib.sha512()\n elif v_len == EC.SHA256:\n v_hash = hashlib.sha256()\n elif v_len == EC.SHA224:\n v_hash = hashlib.sha224()\n elif v_len == EC.SHA1:\n v_hash = hashlib.sha1()\n\n v_hash.update(p_str.encode(\"utf-8\"))\n return v_hash.hexdigest()" ]
[ "0.7611977", "0.73950815", "0.72174555", "0.72113204", "0.71887934", "0.7181167", "0.7116037", "0.7108791", "0.701039", "0.70093346", "0.6961172", "0.691401", "0.6912288", "0.6907212", "0.6874116", "0.683543", "0.6820438", "0.67326635", "0.67163974", "0.6707556", "0.66794527", "0.66767013", "0.6674996", "0.66376996", "0.66369027", "0.656375", "0.65088356", "0.6507814", "0.6507814", "0.6507814", "0.6507814", "0.6470207", "0.6470207", "0.6470207", "0.6467132", "0.64610225", "0.6439882", "0.6371864", "0.6371864", "0.6371864", "0.6371864", "0.6352219", "0.63460547", "0.6332621", "0.63253045", "0.6314473", "0.63121927", "0.6305298", "0.6269669", "0.6269669", "0.6269669", "0.62541705", "0.6247881", "0.6247025", "0.6240863", "0.623623", "0.62223494", "0.6204156", "0.6203057", "0.6196013", "0.61951166", "0.619506", "0.61903185", "0.61842793", "0.6182926", "0.61792266", "0.6169246", "0.61674064", "0.61609584", "0.6156053", "0.6138256", "0.6136648", "0.61325234", "0.61266637", "0.61189604", "0.6109633", "0.6104466", "0.6092034", "0.6083087", "0.608049", "0.6078447", "0.6067243", "0.6065079", "0.60573566", "0.60572845", "0.60530126", "0.6045273", "0.6043789", "0.6039261", "0.6034819", "0.6026298", "0.60220784", "0.6021399", "0.6018024", "0.60143167", "0.59947956", "0.5994529", "0.5988433", "0.59880227", "0.5983778" ]
0.8125466
0
Generate a hash from a dictionary
def hash_dict(dct): h = hashlib.md5() def update(d): for k, v in d.items(): h.update(hashable(k)) if isinstance(v, dict): update(v) else: h.update(hashable(v)) update(dct) return h.digest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_hash(dictionary):\n dhash = hashlib.md5()\n # We need to sort arguments so {'a': 1, 'b': 2} is\n # the same as {'b': 2, 'a': 1}\n encoded = json.dumps(dictionary, sort_keys=True).encode()\n dhash.update(encoded)\n return dhash.hexdigest()", "def dict_hash(dictionary) -> str:\n dhash = hashlib.md5()\n # We need to sort arguments so {'a': 1, 'b': 2} is\n # the same as {'b': 2, 'a': 1}\n encoded = json.dumps(dictionary, sort_keys=True).encode()\n dhash.update(encoded)\n return dhash.hexdigest()", "def hash_dict(_dict):\n return hashlib.sha256(json.dumps(_dict).encode('utf-8')).hexdigest()", "def dict_hash(dictionary: Dict[str, Any]) -> str:\n d_hash = hashlib.md5()\n # We need to sort arguments so {'a': 1, 'b': 2} is\n # the same as {'b': 2, 'a': 1}\n encoded = json.dumps(dictionary, sort_keys=True).encode()\n d_hash.update(encoded)\n return d_hash.hexdigest()", "def hash_dictionary(dictionary: dict[str, Any]) -> str:\n dict_as_string = json.dumps(dictionary, sort_keys=True).encode('utf-8')\n return hashlib.new( # nosec:B324\n 'sha1',\n dict_as_string,\n usedforsecurity=False\n ).hexdigest()", "def hash_from_dict(d):\r\n items = d.items()\r\n items.sort()\r\n first_part = [k for k, v in items]\r\n second_part = []\r\n for k, v in items:\r\n if isinstance(v, (tuple, list)):\r\n second_part += [tuple(v)]\r\n else:\r\n second_part += [v]\r\n tuple_items = tuple(first_part + second_part)\r\n return hash(tuple_items)", "def do_hash(dat: typing.Any) -> str:\n return hashlib.sha1(json.dumps(dat, sort_keys=True).encode('utf-8')).hexdigest()", "def GenerateHash(params):\n exp_params = params.ConvertToDict()\n return hashlib.sha1(\n repr(sorted(exp_params.items())).encode('utf-8')).hexdigest()", "def generate_hash(*args):\n key = bytes(' '.join(args), 'utf_8')\n hashh = hashlib.md5()\n hashh.update(key)\n return hashh.hexdigest()", "def hash(self):\n return hashlib.sha1(str(self._dict))", "def make_hash(o):\n\n if isinstance(o, (set, tuple, list)):\n\n return hash( tuple([make_hash(e) for e in o]) )\n\n elif not isinstance(o, dict):\n\n return hash(o)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def dict_hash(obj, start=''):\n h = hashlib.sha1(to_bytes(start))\n h.update(to_bytes(obj.__class__.__name__))\n if isinstance(obj, dict):\n for key, value in sorted(obj.items()):\n h.update(to_bytes(key))\n h.update(to_bytes(dict_hash(value)))\n elif isinstance(obj, (list, tuple)):\n for el in obj:\n h.update(to_bytes(dict_hash(el)))\n else:\n # basic types\n if isinstance(obj, bool):\n value = str(int(obj))\n elif isinstance(obj, (six.integer_types, float)):\n value = str(obj)\n elif isinstance(obj, (six.text_type, bytes)):\n value = obj\n elif obj is None:\n value = b''\n else:\n raise ValueError(\"Unsupported value type: %s\" % obj.__class__)\n h.update(to_bytes(value))\n return h.hexdigest()", "def h(x):\n\n hasher = hashlib.sha256()\n hasher.update(x)\n return hasher.digest()", "def hash(self) -> bytes:", "def gen_model_hash(json_: str) -> str:\n # get hash of dict\n hash_ = hashlib.md5() # nosec\n encoded = json_.encode()\n hash_.update(encoded)\n return hash_.hexdigest()", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "def hashing(word) :\r\n ans = hashlib.sha256(word.encode())\r\n return ans.hexdigest()", "def get_hash_for_flat_dictionary(data):\n # type: (Dict[str, Union[int, bool, six.text_type]]) -> six.text_type\n data = data or {}\n\n # NOTE: We use hash over hashlib since it's faster. Keep in mind that result of hash is Python\n # interpreter instance specific and is not stable acros the run. This is fine in our case where\n # we only store this hash in memory of a single process (and we never serialize / write it out\n # to disk or similar). With hash() function, there is also a larger chance of a collision, but\n # that's fine here.\n return six.text_type(hash(frozenset(data.items())))", "def make_hash(o):\n if isinstance(o, (set, tuple, list)):\n return hash(tuple([make_hash(e) for e in o]))\n elif not isinstance(o, dict) and o.__class__.__module__ == 'builtins':\n return hash(o)\n elif not isinstance(o, dict):\n return make_hash(o.__dict__)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def create_hash(*args):\n challenge_str = jsonpickle.encode(args)\n challenge_hash = hashlib.sha256(challenge_str.encode())\n return Bn.from_binary(challenge_hash.digest())", "def hash_function_1(key):\n hash = 0\n for i in key:\n hash = hash + ord(i)\n return hash", "def dict_to_hex(cls, d: Dict[str, Any]) -> str:\n md5 = hashlib.md5()\n keys = sorted(d.keys())\n for key in keys:\n value = d[key]\n if isinstance(value, dict):\n value = cls.dict_to_hex(value)\n else:\n value = hash('%s::%s' % (type(value), value))\n value = \"%s::%s\" % (key, value)\n md5.update(value.encode('utf-8'))\n return md5.hexdigest()", "def get_hashval(inputdict, skip=None):\n\n dict_withhash = {}\n dict_nofilename = OrderedDict()\n keys = {}\n for key in inputdict:\n if skip is not None and key in skip:\n continue\n keys[key.uri] = key\n for key in sorted(keys):\n val = inputdict[keys[key]]\n outname = key\n try:\n if isinstance(val, pm.URIRef):\n val = val.decode()\n except AttributeError:\n pass\n if isinstance(val, pm.QualifiedName):\n val = val.uri\n if isinstance(val, pm.Literal):\n val = val.value\n dict_nofilename[outname] = _get_sorteddict(val)\n dict_withhash[outname] = _get_sorteddict(val, True)\n sorted_dict = str(sorted(dict_nofilename.items()))\n return (dict_withhash, md5(sorted_dict.encode()).hexdigest())", "def nice_hash(*args):\n h = sha1()\n for item in args:\n h.update(unicode(item))\n return b32encode(h.digest())", "def calc_hash(self, record: dict) -> str:\n return sha1(orjson.dumps(record, option=orjson.OPT_SORT_KEYS)).hexdigest()", "def get_hash(self):\n if self.contributes:\n return hash_from_values(self.iter_values())", "def _hash(self, key):\n if self.function == 'fnv':\n h = 2166136261\n for i in range(len(key)):\n h = (h * 16777619) ^ ord(key[i])\n return h\n elif self.function == 'add':\n h = 0\n for i in range(len(key)):\n h += ord(key[i])\n return h", "def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def make_hash(attrs, usekeys=None, N=10):\n if usekeys is None:\n d = attrs\n else:\n d = {k: attrs.get(k, None) for k in usekeys}\n\n s = json.dumps(d, sort_keys=True, cls=JSONEncoder).encode()\n return hashlib.sha1(s).hexdigest()[:N]", "def hasher(c):\n try:\n return hash(c)\n except TypeError:\n if isinstance(c, Mapping):\n subhash = []\n for k in sorted(c.keys()):\n subhash.append(hash(k))\n subhash.append(hasher(c[k]))\n return hash(tuple(subhash))\n elif isinstance(c, Iterable):\n return hash(tuple(hasher(item) for item in c))\n else:\n raise TypeError('cant figure out ' + repr(c))", "def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash(block):\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def _calculate_hash(self, entry):\n entry.pop('id', None)\n return hashlib.sha224(json.dumps(\n entry, cls=DjangoJSONEncoder).encode('utf-8')).hexdigest()", "def printable_hash(h):\n return int(h).to_bytes(32, byteorder='big', signed=False).hex()", "def hash(space, w_object):\n return space.hash(w_object)", "def hash(self) -> str:\r\n ...", "def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)", "def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)", "def freeze_dict(params):\n return hashlib.sha1(\"&\".join(\n \"{key}={value}\".format(\n key = key,\n value = value,\n )\n for key, value in sorted(six.iteritems(params))\n ).encode('utf-8')).hexdigest()", "def hash_function_2(key):\n hash = 0\n index = 0\n for i in key:\n hash = hash + (index + 1) * ord(i)\n index = index + 1\n return hash", "def get_results_hash(self, data):\n data = json.dumps(data, sort_keys=True)\n result = hashlib.sha512(data.encode())\n result_hash = result.hexdigest()\n return result_hash", "def hash_key(aMap, key):\n\treturn hash(key) % len(aMap) #(this will give numbers between 0 and 255)", "def hash(last_block):\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(last_block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash(obj):\n \n import hashlib\n import pickle\n \n sha = hashlib.sha256()\n sha.update(pickle.dumps(obj))\n \n return sha.hexdigest()", "def compute_hash(self):\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()", "def compute_hash(self):\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()", "def hash(self, oid):\n data = self.family_name + self.name +\\\n self.date_of_birth + self.date_of_issue +\\\n self.date_of_expiry + self.issuing_country +\\\n self.issuing_authority + self.license_number +\\\n \"\".join(self.categories_of_vehicles) +\\\n str(self.number_of_entries)\n if oid == 'id-sha1':\n digest = hashes.Hash(hashes.SHA1(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha224':\n digest = hashes.Hash(hashes.SHA224(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha256':\n digest = hashes.Hash(hashes.SHA256(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha384':\n digest = hashes.Hash(hashes.SHA384(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n elif oid == 'id-sha512':\n digest = hashes.Hash(hashes.SHA512(), backend=default_backend())\n digest.update(data.encode())\n return digest.finalize()\n else:\n print('ERROR: Hash algorithm not implemented.')\n sys.exit(1)", "def hash(block):\n # The dictionary MUST be ordered, or we can have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def get_hash(data, exclude: Iterable[str] = tuple()) -> str:\n if isinstance(data, dict):\n data = {k: v for k, v in data.items() if k not in exclude}\n data_str = srsly.json_dumps(data, sort_keys=True).encode(\"utf8\")\n return hashlib.md5(data_str).hexdigest()", "def _makehash():\n return defaultdict(_makehash)", "def hash_function_2(key):\n\n hash = 0\n index = 0\n for i in key:\n hash = hash + (index + 1) * ord(i)\n index = index + 1\n return hash", "def hash_function_2(key):\n\n hash = 0\n index = 0\n for i in key:\n hash = hash + (index + 1) * ord(i)\n index = index + 1\n return hash", "def hash_function_2(key):\n\n hash = 0\n index = 0\n for i in key:\n hash = hash + (index + 1) * ord(i)\n index = index + 1\n return hash", "def _calculate_link_hash(links):\n to_hash = ''.join(sorted(links.keys()))\n # Hashlib takes encoded Strings, not Unicode objects\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def hash_key(aMap,key):#\n\treturn hash(key)%len(aMap)", "def compareHash(query_hash, dict, r):\n retdict = []\n for hash in dict.keys():\n if hammingDist(query_hash, hash) <= r:\n retdict.append(hash)\n return retdict", "def get_report_hash(self, consolidated):\n jsonstr = json.dumps(consolidated, sort_keys=True)\n hashobj = hashlib.sha1(jsonstr)\n hexval = hashobj.hexdigest()\n return hexval", "def hash(block):\n\t\t#Make sure the Dictionnary is ordered to have consistent hashes\n\t\tblock_string = json.dumps(block, sort_keys=True).encode()\n\t\treturn hashlib.sha256(block_string).hexdigest()", "def __get_hashstr(_config_object: dict):\n hashobj = hashlib.md5()\n json_str = json.dumps(_config_object, sort_keys=True).encode('utf-8')\n hashobj.update(json_str)\n dig = hashobj.hexdigest()\n return dig\n # return hashobj.update(json.dumps(_config_object, sort_keys=True).encode('utf-8')).hexdigest()", "def fingerprint(keyed_data, digest_size=16):\n h = blake2b(digest_size=16)\n for key in sorted(keyed_data.keys()):\n val = keyed_data[key]\n s = json.dumps(val, sort_keys=True, cls=NpEncoder).encode()\n h.update(s)\n return h.hexdigest()", "def h_python(key, N):\n return hash(key) % N", "def hash(block):\n # hashes a block\n #we must make sure that the dictionary is ordered, or we will have inconsistent hashes\n block_string = json.dumps(block, sort_keys = True).encode()\n return hashlib.sha256(block_string).hexdigest()\n #pass", "def identifier_hash(identifier, sigfig=None):\n if sigfig is None:\n sigfig = id_sigfig\n\n # convert identifier to integers and order of magnitude\n as_int, multiplier = util.sigfig_int(identifier, sigfig)\n # make all scales positive\n if (multiplier < 0).any():\n multiplier += np.abs(multiplier.min())\n hashable = (as_int * (10 ** multiplier)).astype(np.int64)\n return hashlib.md5(hashable).hexdigest()", "def generate_hash(passwd):\n return hashlib.sha512(passwd.encode(\"utf-8\")).hexdigest()", "def checksum(**kwargs):\n\n # remove secretkey from kwargs, lookup if missing\n secretkey = kwargs.pop('secretkey', resolve_secretkey())\n\n # sort the args, and concatenate them\n param_string = ''.join([''.join([str(x), str(y)])\n for x, y in sorted(kwargs.items())])\n\n return b64encode(str(new_hmac(secretkey, param_string, sha1).digest()))", "def _hash(self, key):\n # OPTIONAL STRETCH: You may replace the Python hash with DJB2 as a stretch goal\n # return hash(key)\n return self._hash_djb2(key)", "def compute_hash(self, key: int):\n return key % 42", "def hash_function_1(key: str) -> int:\n hash = 0\n for letter in key:\n hash += ord(letter)\n return hash", "def hashIt(*args):\n total = int(0)\n for x in args:\n if isinstance(x, dict):\n for k, key in enumerate(sorted(x.keys())):\n total += hashIt(k, key, x[key])\n elif isinstance(x, (list, tuple)):\n for k, value in enumerate(x):\n total += hashIt(k, value)\n else:\n try:\n thisHash = hash(x)\n except:\n try:\n thisHash = hash(pickle.dumps(x))\n except:\n thisHash = 0\n total += thisHash\n return hash(total)", "def hash_function(input_tuple):\n return hash(input_tuple)", "def hash(block):\n\n # Dictionary must be ordered, else hashes will be inconsistent\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hashcode(o):", "def _hash(data):\n return hashlib.sha512(data).hexdigest()", "def hashkey(obj, salt=0):\n if isinstance(obj, str):\n return zlib.adler32(obj.encode(), salt) & 0xffffffff\n elif isinstance(obj, bytes):\n return zlib.adler32(obj, salt) & 0xffffffff\n elif isinstance(obj, datetime_type):\n return zlib.adler32(str(obj).encode(), salt) & 0xffffffff\n return hash(obj) & 0xffffffff", "def hash(self):\n return Hash.dhash(bytes(self))", "def hash_key(self):", "def HashAlgorithm(self) -> _n_7_t_0:", "def hashWord(self, word):\n return self.func(word).hexdigest()", "def calc_statistics_hash(self) -> bytes:\n raise NotImplementedError()", "def hash(self, key):\n key = str(key)\n h = 0\n a = self.hash_base\n for i in range(len(str(key))):\n h = (h * a + ord(key[i])) % self.table_capacity\n return h", "def create_config_hash(config):\n value_str = \"\"\n for section in config.sections:\n for key in section.keys():\n value_str += str(config[section][key])\n value_hash = hashlib.md5(value_str.encode('utf-8')).hexdigest()\n\n return value_hash", "def hash_generator(self, value):\n hash_string = hashlib.sha256(bytes(value))\n return hash_string.hexdigest()", "def keyhash(string):\n return hashlib.sha1(string.encode('utf-8')).hexdigest()", "def seed_hash(*args):\n args_str = str(args)\n return int(hashlib.md5(args_str.encode(\"utf-8\")).hexdigest(), 16) % (2**31)", "def _hash_function(self, key):\n h = 0\n a = 31\n table_size = self.size\n for i in range(len(key)):\n h = (h * a + ord(key[i])) % table_size\n return h", "def get_policy_hash(path_dict):\n\n if args.verbose:\n print path_dict\n\n policy_config = ''\n for k, v in path_dict.iteritems():\n policy_string = '{\"path\":{\"' + k + '\":' + str(v).replace(' ', '').replace(\"'\", \"\\\"\") + '}}'\n if policy_config == '':\n policy_config = str(policy_string)\n else:\n policy_config = policy_config + ', ' + str(policy_string)\n\n return {\"policy\": policy_config}", "def hash(self, searchkey):\n return searchkey % self.n", "def hash_args(self, args, secret=None):\n for a in args:\n if isinstance(args[a], list): args[a] = json.dumps(args[a])\n\n args_joined = ''\n for a in sorted(args.keys()):\n if isinstance(a, unicode):\n args_joined += a.encode('utf-8')\n else:\n args_joined += str(a)\n\n args_joined += '='\n\n if isinstance(args[a], unicode):\n args_joined += args[a].encode('utf-8')\n else:\n args_joined += str(args[a])\n\n hash = hashlib.md5(args_joined)\n\n if secret:\n hash.update(secret)\n elif self.api_secret:\n hash.update(self.api_secret)\n return hash.hexdigest()", "def hashcode(self) -> str:\n spreadsheet_spec_dict = self.to_dict()\n sorted_keys = dict(sorted(spreadsheet_spec_dict.items()))\n return md5(json.dumps(sorted_keys).encode(\"utf-8\")).hexdigest()", "def hash_double(d):\n # initialize table\n table = [\"-\"] * 19\n # consider each integer k in the input\n for k in d:\n # if k is already in the table this is a duplicate so move to next integer in the input\n # note this check for a duplicate is using the functionality of python rather than checking using a linear probe\n if k in table:\n continue\n # apply the hash function\n i = (6 * k + 3) % 19\n t = i\n # initialize count that checks whether linear probe has considered each bucket and is now full\n count = 0\n j = 0\n # while bucket is already filled\n s = 11 - (k % 11)\n while table[i] != '-':\n j += 1\n # move to next bucket\n i = (t + j*s) % 19\n # increment count\n count += 1\n\n # if table is full\n if count >= 18:\n # can return table as nothing further can be added\n break\n\n # Ensure table[i] is empty so k can be added here\n if table[i] == '-':\n table[i] = k\n\n # now each part of the input has been considered return the table\n return table", "def hash(x) -> int:\n pass", "def _hash_function(self, x):\n return hashlib.sha1(x).hexdigest()", "def _s_hash(fn, data: str):\n\n return fn(_b(data)).hexdigest()", "def deep_hash(obj):\n pass", "def hash(self, key):\n return self._hash_function(key) % self.size # Modular hashing", "def mkPerfHash(keys, Hash):\n f1, f2, G = generate_hash(keys, Hash)\n return lambda k: (G[f1(k)] + G[f2(k)]) % len(G)", "def _Hash(self):\n fullhash = util.Hash(util.IntToBytes(len(self.key_bytes)), self.key_bytes)\n return util.Encode(fullhash[:keyczar.KEY_HASH_SIZE])" ]
[ "0.78616196", "0.77828234", "0.7741928", "0.7730784", "0.7210692", "0.7073175", "0.6808858", "0.68039656", "0.67542607", "0.6678936", "0.6636507", "0.6627023", "0.66170514", "0.6524575", "0.6503319", "0.6502774", "0.64773655", "0.64493966", "0.64259434", "0.6421355", "0.64005363", "0.639311", "0.6389152", "0.6380154", "0.6373621", "0.63601995", "0.635451", "0.6348349", "0.6342093", "0.633456", "0.6320462", "0.6320462", "0.6320462", "0.6320462", "0.6320462", "0.631132", "0.6296834", "0.6272674", "0.62663734", "0.62660587", "0.62660587", "0.6263562", "0.62306225", "0.6218274", "0.6211767", "0.6208638", "0.6206094", "0.6192517", "0.6192517", "0.6184732", "0.61818224", "0.61816627", "0.6178514", "0.6174509", "0.6174509", "0.6174509", "0.61672765", "0.6154206", "0.61522883", "0.6148275", "0.6145531", "0.6136312", "0.6132912", "0.61160266", "0.6115569", "0.61043763", "0.6103221", "0.61010563", "0.60998726", "0.6084789", "0.607307", "0.6062291", "0.60597306", "0.60513794", "0.6044335", "0.604067", "0.6038405", "0.6033933", "0.6032533", "0.60323626", "0.60125506", "0.60104173", "0.6004864", "0.59961355", "0.5987381", "0.5983532", "0.5981971", "0.5972515", "0.5962675", "0.5962584", "0.5961929", "0.5953544", "0.5945485", "0.59383607", "0.59247977", "0.59179264", "0.5915664", "0.5912343", "0.5907243", "0.5906716" ]
0.67284095
9
Ensure row data is valid This currently just checks that 2D arrays match the variable components.
def validate_row(row): subkeys = [INDEP, DEP] for subkey in subkeys: for k, v in row[subkey].items(): if v is None: continue if np.ndim(v) > 1: assert np.ndim(v) == 2 if 1 not in np.shape(v): assert isinstance(k, variable.Variable) assert k.components is not None assert len(k.components) in np.shape(v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_data(self, row, col, value):\n\n return True", "def check_row(row):\n \n if len(row) != _ncols:\n raise ValueError(\"Row contains {0} columns, expected {1}!\\n\\n{2}\\n\".format(len(row), _ncols, row))", "def validate(self, row):\n raise NotImplementedError", "def _validate_values(self, values):\n prev_len = -1\n i = j = -1\n if values is None or len(values) == 0:\n self.shape = 0, 0\n return\n for i, row in enumerate(values):\n if prev_len == -1:\n prev_len = len(row)\n if prev_len != len(row):\n raise ValueError(f\"Row {i} differs in length: {prev_len} != {len(row)}\")\n for j, val in enumerate(row):\n if type(val) not in (int, float, complex):\n raise ValueError(f\"[{i}, {j}]: {val} is of bad type ({type(val)})\")\n if val == 0:\n self.empty_loc = (i, j)\n if i == -1:\n self.shape = 0, 0\n else:\n self.shape = i + 1, j + 1", "def validate_matrix(self, data, **kwargs):\n validate_matrix(data.get(\"params\"))", "def _validate_row(self, row):\n\n # assume value.\n is_valid = True\n\n # test if each field in @row has the correct data type.\n tests = []\n for field, value in row.items():\n value_type, header_type = (type(value).__name__, \n self.required_headers[field].__name__)\n test = value_type == header_type\n if not test:\n err = \"Field '{}' not valid; expected '{}', got '{}'.\".format(field,\n header_type, value_type)\n self.logger.debug(err)\n tests.append(test)\n\n # if any test failed, set @is_valid to False.\n if False in tests:\n is_valid = False\n \n return is_valid", "def _validateRowCol(self, rows, cols, numRow, numCol, dvName):\n if rows is not None:\n rowArr = np.array(rows)\n if np.max(rowArr) > numRow:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Design var has \"\n + str(numRow)\n + \" rows and index up to \"\n + str(np.max(rowArr))\n + \" was specified: \"\n + str(rows)\n )\n if np.min(rowArr) < 1:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Row index less than 1 specified: \"\n + str(rows)\n )\n if len(rows) != len(set(rows)):\n # duplicates\n raise Error(\"Duplicate indices specified in the rows of design variable \" + dvName + \": \" + str(rows))\n\n if cols is not None:\n colArr = np.array(cols)\n if np.max(colArr) > numCol:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"Design var has \"\n + str(numCol)\n + \" cols and index up to \"\n + str(np.max(colArr))\n + \" was specified: \"\n + str(cols)\n )\n if np.min(colArr) < 1:\n raise Error(\n \"Design variable \"\n + dvName\n + \" slice out of bounds. \"\n + \"col index less than 1 specified: \"\n + str(cols)\n )\n if len(cols) != len(set(cols)):\n # duplicates\n raise Error(\"Duplicate indices specified in the cols of design variable \" + dvName + \": \" + str(cols))", "def _validate_from_plain(data: Sequence[Sequence],\n columns: Sequence[str],\n dtypes: Sequence[str],\n row_wise: bool):\n\n if row_wise:\n # assert equal number of elements across rows\n row_lenghts = {len(row) for row in data}\n if len(row_lenghts) > 1:\n raise ValueError(\"Input data has varying number of values per \"\n \"row. Please check provided input data\")\n\n # assert equal number of columns and elements per row\n row_lenghts.add(len(columns))\n if len(row_lenghts) > 1:\n raise ValueError(\n \"Number of columns has to equal the number of \"\n \"values per row. Please check column names and \"\n \"provided input data.\")\n\n # assert equal number of dtypes and elements per row\n row_lenghts.add(len(dtypes))\n if len(row_lenghts) > 1:\n raise ValueError(\"Number of dtypes has to equal the number of \"\n \"values per row. Please check dtypes and \"\n \"provided input data.\")\n\n else:\n # assert equal number of elements across columns\n col_lengths = {len(col) for col in data}\n if len(col_lengths) > 1:\n raise ValueError(\"Input data has varying number of values per \"\n \"columns. Please check provided input data\")\n\n # assert equal number of columns in data, column names and dtypes\n col_count = len(columns)\n if col_count != len(data):\n raise ValueError(\"Input data and column names have different \"\n \"amount of columns. Please check provided \"\n \"input data\")\n\n if col_count != len(dtypes):\n raise ValueError(\"Input data and dtypes have different \"\n \"amount of columns. Please check provided \"\n \"input data\")", "def isValid(self):\n for ir in range(self.nRow): # Check rows for duplicates\n row = ir + 1\n vals = {}\n for ic in range(self.nCol):\n col = ic + 1\n val = self.getCellVal(row=row, col=col)\n if not self.isEmpty(val):\n if val in vals:\n SlTrace.lg(f\"doing row {row} at col={col} val={val} vals={vals} invalid\")\n SlTrace.lg(f\"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}\")\n return False\n vals[val] = val\n \n for ic in range(self.nCol): # Check cols for duplicates\n col = ic + 1\n vals = {}\n for ir in range(self.nRow):\n row = ir + 1\n val = self.getCellVal(row=row, col=col)\n if not self.isEmpty(val):\n if val in vals:\n SlTrace.lg(f\"at row={row} doing col={col} val={val} vals={vals} invalid\")\n SlTrace.lg(f\"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}\")\n return False\n vals[val] = val\n return True", "def verify_vertex_values(self):\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] not in range(1, 10) and self.grid[line][row] is not None:\n raise VertexValueError()", "def _verify_integrity(self):\n if len(self.data.shape) != 1:\n raise ValueError(\n \"Data array must be one dimensional \"\n \"(is {})\".format(len(self.data.shape))\n )\n\n if len(self.shape.shape) != 2:\n raise ValueError(\n \"Shape array must be two dimensional \"\n \"(is {})\".format(len(self.shape.shape))\n )\n\n shape_size, data_size = self._cumsum[-1], self.data.size\n\n if not shape_size == data_size:\n raise ValueError(\n \"Size of data ({data_size}) does not match that \"\n \"of the given shapes ({shape_size}).\".format(\n data_size=data_size, shape_size=shape_size\n )\n )", "def verify_grid_row_data(self, row_data):\n return self.verify_grid_row_details(self.vendors_div_id, row_data)", "def isLegal(self):\n # checks for same values in rows\n for n in range(9):\n rows = set()\n for m in range(9):\n if self.puzzle[n][m] != 0:\n size = len(rows)\n rows.add(self.puzzle[n][m])\n if size == len(rows):\n return False\n\n #checks for same values in columns\n for m in range(9):\n cols = set()\n for n in range(9):\n if self.puzzle[n][m] != 0:\n size = len(cols)\n cols.add(self.puzzle[n][m])\n if size == len(cols):\n return False\n\n #checks for same values in sections\n sections = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n for r in sections:\n for c in sections:\n sects = set()\n for n in r:\n for m in c:\n if self.puzzle[n][m] != 0:\n size = len(sects)\n sects.add(self.puzzle[n][m])\n if size == len(sects):\n return False\n return True", "def validate_data(self):\n if self.type == 'grid':\n for layout in self.data:\n grid = layout.get('grid')\n if not grid:\n raise ChartError(\n \"Layout grid setting must be set \"\n \"if layout type is 'grid'\")\n\n if not grid.get('location'):\n raise ChartError(\n \"Layout grid location must be set \"\n \"if layout type is 'grid'\")\n\n if len(grid['location']) != 2:\n raise ChartError(\"Layout grid location length must be 2\")", "def is_rows_valid(bd):\n for row in rows:\n seen = []\n for num in nums:\n if bd[row[num]] == \" \":\n continue\n elif bd[row[num]] not in seen:\n seen += [bd[row[num]]]\n else:\n return False\n else:\n continue\n return True", "def _isvalid(self, data):\n if data is None:\n return False\n elif isinstance(data, (list,tuple)):\n if len(data) <= 0:\n return False\n else:\n return True\n elif isinstance(data, (np.ndarray)):\n if data.size <= 0:\n return False\n else:\n return True\n elif not data:\n return False\n else:\n return True", "def do_grid_check(self,):\n self.ydim, self.xdim = self.data_fcst.shape \n if self.data_obs.shape != (self.ydim,self.xdim):\n raise FormatError(\"Obs and forecast data not same size.\")\n return", "def _check_data_valid(self):\n\n is_valid = (sum(~np.isnan(self.data).flatten()) > 0 and self.data.flatten().sum() != 0)\n if not is_valid:\n raise FITSException(f\"No data in {self.survey}\")", "def is_valid(self):\n if self.get_row() != -1 and self.get_column() != -1:\n return True\n else:\n return False", "def is_valid(array, index):\n row, column = index\n return 0 <= row < len(array) and 0 <= column < len(array[row])", "def __is_valid_row(self, row_number):\n return self.__is_valid((row_number, 0))", "def validate_dataset(columns, rows):\n # Ensure that all column identifier are zero or greater, unique, and smaller\n # than the column counter (if given)\n col_ids = set()\n for col in columns:\n if col.identifier < 0:\n raise ValueError('negative column identifier \\'' + str(col.identifier) + '\\'')\n elif col.identifier in col_ids:\n raise ValueError('duplicate column identifier \\'' + str(col.identifier) + '\\'')\n col_ids.add(col.identifier)\n # Ensure that all row identifier are zero or greater, unique, smaller than\n # the row counter (if given), and contain exactly one value for each column\n row_ids = set()\n for row in rows:\n if len(row.values) != len(columns):\n raise ValueError('schema violation for row \\'' + str(row.identifier) + '\\'')\n elif row.identifier < 0:\n raise ValueError('negative row identifier \\'' + str(row.identifier) + '\\'')\n elif row.identifier in row_ids:\n raise ValueError('duplicate row identifier \\'' + str(row.identifier) + '\\'')\n row_ids.add(row.identifier)\n return max(col_ids) if len(col_ids) > 0 else -1, max(row_ids) if len(row_ids) > 0 else -1", "def is_valid(self,row,col) -> bool:\n if(row >=0 and col>=0 and row<self.row and col<self.col and self.array[row][col]==-1 ):\n return True\n return False", "def _validate_input_data(self):\n\n if type(self.data) in [np.ndarray, da.Array]:\n if not self.data.dtype.names:\n raise ValueError('QuadTree: numpy array provided for data, but no names were found, array must be a structured array')\n if 'x' not in self.data.dtype.names or 'y' not in self.data.dtype.names:\n raise ValueError('QuadTree: numpy structured array provided for data, but \"x\" or \"y\" not found in variable names')\n self.layernames = [self.rev_layer_lookup[var] for var in self.data.dtype.names if var in ['z', 'tvu']]\n elif type(self.data) == xr.Dataset:\n if 'x' not in self.data:\n raise ValueError('QuadTree: xarray Dataset provided for data, but \"x\" or \"y\" not found in variable names')\n if len(self.data.dims) > 1:\n raise ValueError('QuadTree: xarray Dataset provided for data, but found multiple dimensions, must be one dimensional: {}'.format(self.data.dims))\n self.layernames = [self.rev_layer_lookup[var] for var in self.data if var in ['z', 'tvu']]\n self._convert_dataset() # internally we just convert xarray dataset to numpy for ease of use\n else:\n raise ValueError('QuadTree: numpy structured array or dask array with \"x\" and \"y\" as variable must be provided')", "def validate_datasets(row):\n data_validator = DataJSONDataset(row)\n valid = data_validator.validate(validator_schema=row['validator_schema'])\n errors = data_validator.errors\n row['validation_errors'] = errors\n if not valid:\n logger.error(f'Error validating {row}: {errors}')", "def validate_dataset(self):\n pass", "def is_valid_row(self):\r\n return self.valid_row", "def _validate_level(self, levelText):\n if len([line for line in levelText.splitlines() if line.strip()]) != 6:\n # wrong num rows\n return False\n \n if any(len(list(line)) != 6 for line in levelText.splitlines() if line.strip()):\n # wrong num cols\n return False\n\n return True", "def validate_input(self):\n self._validate_limits_cols_prefixed()\n self._validate_fillna_cols_prefixed()\n self._validate_ratio_input()", "def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )", "def check_dims(self, data):\n if np.ndim(data) != 2:\n raise ValueError('Input data must be a two dimensional numpy array. '\n 'Data received has shape (%g, %g).' % data.shape)", "def verify_transformed(self, data):\n data_dim = data.shape[-1]\n if data_dim != self.dimension:\n error(\n \"{} result dimension {} does not match the prescribed input dimension {}\"\n .format(self.name, data_dim, self.dimension))\n nans, _ = np.where(np.isnan(data))\n if np.size(nans) != 0:\n error(\"{} result contains nan elements in :{}\".format(\n self.name, nans))", "def _data_params_validation(self) -> None:\n extra_regressor_names = set(self.params._reqd_regressor_names)\n # univariate case\n if self.data.is_univariate():\n if len(extra_regressor_names) != 0:\n msg = (\n f\"Missing data for extra regressors: {self.params._reqd_regressor_names}! \"\n \"Please include the missing regressors in `data`.\"\n )\n raise ValueError(msg)\n # multivariate case\n else:\n value_cols = set(self.data.value.columns)\n if \"y\" not in value_cols:\n msg = \"`data` should contain a column called `y` representing the responsive value.\"\n raise ValueError(msg)\n if not extra_regressor_names.issubset(value_cols):\n msg = f\"`data` should contain all columns listed in {extra_regressor_names}.\"\n raise ValueError(msg)\n # validate cap\n if (self.params.cap is True) and (\"cap\" not in self.data.value.columns):\n msg = \"`data` should contain a column called `cap` representing the cap when `cap = True`.\"\n _error_msg(msg)\n # validate floor\n if (self.params.floor is True) and (\"floor\" not in self.data.value.columns):\n msg = \"`data` should contain a column called `floor` representing the floor when `floor = True`.\"\n _error_msg(msg)", "def __allowed_values_correct_matrix(self):\n strTestName = 'Values of a Numpy Array 2D (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy Array 2D')\n RxCSObject.paramAllowed('parameter1', range(int(2e3)))\n RxCSObject.parameter1 = np.random.randint(1, 1e3, (1e2, 1e1))\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_is_valid_row(self):\n dognames = student_submission.read_csv('./dognames.csv')\n\n self.assertTrue(student_submission.is_valid_row(dognames[999]),\n 'Your implementation seems wrong')\n self.assertTrue(student_submission.is_valid_row(dognames[999], year=2010),\n 'Your implementation seems wrong')\n self.assertTrue(student_submission.is_valid_row(dognames[999], sex='m'),\n 'Your implementation seems wrong')\n self.assertTrue(student_submission.is_valid_row(dognames[999], year=2010, sex='m'),\n 'Your implementation seems wrong')\n self.assertFalse(student_submission.is_valid_row(dognames[999], year=2006, sex='m'),\n 'Your implementation seems wrong')\n self.assertFalse(student_submission.is_valid_row(dognames[999], year=2010, sex='w'),\n 'Your implementation seems wrong')\n\n self.assertEqual(sum(student_submission.is_valid_row(dognames[i]) for i in range(len(dognames))), 6980,\n 'Your implementation seems wrong')\n\n self.assertEqual(sum(student_submission.is_valid_row(dognames[i], sex='w') for i in range(len(dognames))), 3549,\n 'Your implementation seems wrong')\n\n self.assertEqual(sum(student_submission.is_valid_row(dognames[i], year=2000) for i in range(len(dognames))), 118,\n 'Your implementation seems wrong')", "def _validate_array_params(array_params):\n if isinstance(array_params, dict):\n # Shallow check; make sure each antenna position is a 3-vector.\n if all(len(pos) == 3 for pos in array_params.values()):\n return True\n elif isinstance(array_params, str):\n # Shallow check; just make sure the file exists.\n return os.path.exists(array_params)\n else:\n raise TypeError(\"Array layout must be a dictionary or path to a layout csv.\")", "def is_valid(self, dataset):\n pass", "def valid_input(self, row, col):\n return ((row, col) not in self.marks and\n row <= WIDTH and row > 0 and\n col in COL_MAP)", "def is_valid(row, peg):\n return (\n (row < TRI_SIZE) and\n (row >= 0) and\n (peg < TRI_SIZE) and\n (peg >= 0) and\n (peg <= row)\n )", "def checkTrainData(cls, data):\n\n if data == None or len(data) == 0:\n raise Exception(\"No data\")\n\n if type(data[0]) != tuple:\n raise Exception(\"Not a list of tuples\")\n\n if len(data[0]) != 2 and type(data[0][0]) != str and type(data[0][1]) != list:\n raise Exception(\"Not a tuple of (String, [data])\")\n\n length = len(data[0][1])\n\n for tup in data:\n if len(tup) != 2 and type(tup[0]) != str and type(tup[1]) != list:\n raise Exception(\"Not a tuple of (String, [data])\")\n\n if len(tup[1]) != length:\n raise Exception(\"Not all elements have the same amount of data\")", "def enforce_2D(self,):\n for data in (self.data_obs,self.data_fcst):\n shp = data.shape\n if len(shp) == 2:\n pass \n elif len(shp) == 3:\n if shp[0] == 0:\n data = data[0,:,:]\n elif len(shp) == 4:\n if (shp[0] == 0) and (shp[1] == 0):\n data = data[0,0,:,:]\n else:\n raise FormatError(\"Data needs to be 2D.\")\n return", "def validate_dataset(self):\n if np.all(self.L_bpe == self.bpe_l):\n pass\n\n super(StandardDataset, self).validate_dataset()", "def valid(self):\n # Verify correct vertex values\n self.verify_vertex_values()\n # Check for duplicate values in lines\n for line in range(9):\n seen = []\n for row in range(9):\n if self.grid[line][row] is None:\n pass\n elif self.grid[line][row] in seen:\n return False\n else:\n seen.append(self.grid[line][row])\n # Check for duplicate values in rows\n for row in range(9):\n seen = []\n for line in range(9):\n if self.grid[line][row] is None:\n pass\n elif self.grid[line][row] in seen:\n return False\n else:\n seen.append(self.grid[line][row])\n # Check for duplicate values in subgrids\n for (subgrid_line, subgrid_row) in [(subg_ln, subg_rw) for subg_ln in range(3) for subg_rw in range(3)]:\n seen = []\n for (line, row) in [(ln, rw) for ln in range(3) for rw in range(3)]:\n if self.grid[3*subgrid_line + line][3*subgrid_row + row] is None:\n pass\n elif self.grid[3*subgrid_line + line][3*subgrid_row + row] in seen:\n return False\n else:\n seen.append(self.grid[3*subgrid_line + line][3*subgrid_row + row])\n # No duplicates found\n return True", "def test_validate_2d(data, msg):\n if msg:\n with pytest.raises(ValueError, match=msg):\n _ = _validate_2d(data, 'test')\n else:\n assert data == _validate_2d(data, 'test')", "def is_valid(data):\n check = [0 for i in range(4)]\n # calculate how many ships are with different lengths\n for i in range(10):\n for j in range(10):\n if type(data[i][j]) == Ship:\n check[data[i][j]._Ship__length - 1] += 1\n # check ships\n for i in range(4):\n if check[i] != (i + 1) * (4 - i):\n return False\n # check corners\n for i in range(1, 10):\n for j in range(10):\n try:\n if type(data[i - 1][j + 1]) == Ship and \\\n type(data[i][j]) == Ship:\n return False\n except:\n pass\n try:\n if type(data[i - 1][j - 1]) == Ship and \\\n type(data[i][j]) == Ship:\n return False\n except:\n pass\n return True", "def check_meatadata_row(validated, input_validate_dict, row, idx):\n\n if row['RealCrystalName'].isspace() or row['RealCrystalName'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'RealCrystalName spaces or null', idx + 2)\n validated = False\n if row['crystal_name'].isspace() or row['RealCrystalName'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Crystal name spaces or null', idx + 2)\n validated = False\n if row['RealCrystalName'] not in row['crystal_name']:\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Crystal name does not contain RealCrystalName', idx + 2)\n validated = False\n if row['smiles'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Smiles null', idx + 2)\n validated = False\n\n return validated, input_validate_dict", "def test_row_from_columns_not_own_error_row_table(self):\n errors_on_separate_row = False\n field_setup = None\n error_names = None\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n for row in setup:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])", "def _verify_data(inputs, targets):\n check_value_type('inputs', inputs, Tensor)\n if len(inputs.shape) != 4:\n raise ValueError(f'Argument inputs must be 4D Tensor, but got {len(inputs.shape)}D Tensor.')\n check_value_type('targets', targets, (Tensor, int, tuple, list))\n if isinstance(targets, Tensor):\n if len(targets.shape) > 2:\n raise ValueError('Dimension invalid. If `targets` is a Tensor, it should be 0D, 1D or 2D. '\n 'But got {}D.'.format(len(targets.shape)))\n if targets.shape and len(targets) != len(inputs):\n raise ValueError(\n 'If `targets` is a 2D, 1D Tensor, it should have the same length as inputs {}. But got {}.'.format(\n len(inputs), len(targets)))", "def valid_image(self, image):\n valid = False\n if (isinstance(image, list) and len(image) == 11):\n valid = True\n for row in image:\n if (isinstance(row, list) and len(row) == 11):\n for pixel in row:\n if not self.valid_color(pixel):\n valid = False\n break\n else:\n valid = False\n break\n if not valid:\n _LOGGER.error(\"Invalid image data received\")\n return valid", "def test_row_from_columns_not_own_error_row(self):\n errors_on_separate_row = False\n field_setup = None\n error_names = None\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n for row in setup:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])", "def _validate_data(self, table_definition, data):\n if len(data) == 0:\n # Length zero columns get converted on write.\n return\n\n columns_checked = set()\n\n for column_name, column_definition in table_definition.c.items():\n if column_name in data:\n expected_type = self._expected_type(column_definition)\n is_nullable_numeric = (column_definition.nullable and\n expected_type in [int, float])\n if is_nullable_numeric:\n data[column_name] = data[column_name].fillna(value=np.nan)\n actual_type = data[column_name].dtype\n is_pandas_extension = isinstance(actual_type, ExtensionDtype)\n if expected_type is int:\n self._check_int_type(actual_type, column_name,\n is_pandas_extension, table_definition)\n elif expected_type is float:\n self._check_float_type(actual_type, column_name,\n table_definition)\n elif expected_type is str:\n self._check_str_type(actual_type, column_name, data,\n table_definition)\n else:\n raise RuntimeError(f\"Unexpected type from column \"\n f\"definitions: {expected_type}.\")\n elif not (column_definition.primary_key or\n column_definition.nullable):\n raise DismodFileError(f\"Missing column in data for table \"\n f\"'{table_definition.name}': \"\n f\"'{column_name}'\")\n columns_checked.add(column_name)\n\n extra_columns = set(data.columns).difference(table_definition.c.keys())\n if extra_columns:\n raise DismodFileError(f\"extra columns in data for table \"\n f\"'{table_definition.name}': {extra_columns}\"\n )", "def test_row_from_columns_has_errors(self):\n errors_on_separate_row = True\n field_setup = None\n error_names = ['first', 'billing_address_1', 'billing_country_area']\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n has_no_errors = setup[-1]\n for row in setup:\n if row == has_no_errors:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n else:\n self.assertGreater(len(row['expected']), 1)\n self.assertGreater(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])", "def _validate_dimensionality(self):\r\n\r\n if self.time.ndim != 1:\r\n raise ValueError(\"time array must be one-dimensional\")\r\n npoints = self.data.shape[-1]\r\n if npoints != len(self.time):\r\n raise ValueError(\"mismatch of time and data dimensions\")", "def _validate_tiling(inst: Any, attr: Any, value: List[List[float]]) -> None:\n if len(value) == 0:\n raise ValueError(\"Tiling must have at least 1 row\")\n\n if any(len(t) == 0 for t in value):\n raise ValueError(\"Tiling must have at least 1 column\")\n\n if min(len(t) for t in value) != max(len(t) for t in value):\n raise ValueError(\"Tiling must have the same number of column for each row.\")", "def _check_data(self):\n # ================ CHECK DATA / CONNECT / SELECT ================\n N = self.xyz.shape[0]\n # Chech array :\n if (self.connect.shape != (N, N)) or not isinstance(self.connect,\n np.ndarray):\n raise ValueError(\"c_connect must be an array of \"\n \"shape \" + str((N, N)))\n if self.select is None:\n self.select = np.ones_like(self.connect)\n if (self.select.shape != (N, N) or not isinstance(self.select,\n np.ndarray)):\n raise ValueError(\"c_select must be an array of \"\n \"shape \" + str((N, N)))\n # Mask c_connect :\n try:\n self.connect.mask\n except:\n self.connect = np.ma.masked_array(self.connect, mask=True)\n self.connect.mask[self.select.nonzero()] = False\n # Use specific color values :\n if (self.colval is not None) and isinstance(self.colval, dict):\n mask = np.ones_like(self.connect.mask)\n for k, v in zip(self.colval.keys(), self.colval.values()):\n mask[self.connect.data == k] = False\n self.colval[k] = color2vb(v)\n self.connect.mask = mask\n\n # ================ CHECK COLOR ================\n # Check colorby :\n if self.colorby not in ['count', 'strength', 'density']:\n raise ValueError(\"The c_colorby parameter must be 'count', \"\n \"'strength' or 'density'\")\n # Test dynamic :\n if (self.dynamic is not None) and not isinstance(self.dynamic, tuple):\n raise ValueError(\"c_dynamic bust be a tuple\")\n\n # ================ NON-ZERO INDICES ================\n # Find where there is non-masked connections :\n self._nnz_x, self._nnz_y = np.where(~self.connect.mask)\n self._indices = np.c_[self._nnz_x, self._nnz_y].flatten()\n self._Nindices = np.arange(len(self._indices))\n # Build position array :\n self.a_position = np.zeros((2*len(self._nnz_x), 3), dtype=np.float32)\n self.a_position[self._Nindices, :] = self.xyz[self._indices, :]", "def is_valid_board(self):\n for (row, col), value in np.ndenumerate(self.final_values): # Iterate through each position\n if not self.__is_valid_value(row, col, value): # Check that the value is valid\n return False # An invalid (duplicate) value was found\n return True", "def validate_data(self, row, col, value):\n start = self.data(self.index(row, 1), QtCore.Qt.DisplayRole)\n stop = self.data(self.index(row, 2), QtCore.Qt.DisplayRole)\n step = self.data(self.index(row, 3), QtCore.Qt.DisplayRole)\n isstep = False\n if col == 1: # the start\n start = value\n elif col == 2: # the stop\n stop = value\n elif col == 3: # the step\n isstep = True\n step = value\n\n if np.abs(step) < 1e-12 or start == stop:\n return False\n if np.sign(stop - start) != np.sign(step):\n if isstep:\n self._data[row][2] = -stop\n else:\n self._data[row][3] = -step\n return True", "def validate_bed_format(row):\n assert len(row) >= 3, 'Bed Files must have at least 3 tab separated fields.'\n\n return True", "def test_validate_input_good_input(self):\r\n _validate_input(self.dist_matrix_header, self.dist_matrix,\r\n self.mapping_header, self.mapping, \"Treatment\")", "def getValidRowsCols(self) :\n colns = number_of_good_cols(self.r_sheet)\n rowns = number_of_good_rows(self.r_sheet)\n \n # Check whether the number of good columns and rows are correct\n while self.isEmptyRow(rowns-1, colns) :\n rowns = rowns - 1 \n while self.isEmptyColumn(colns-1, rowns) :\n colns = colns - 1\n \n self.log.debug('Number of rows with content: {0}'.format(rowns))\n self.log.debug('Number of columns with content: {0}'.format(colns))\n return rowns, colns", "def _validate(matrix:[[int]], vector: [int]):\n if not vector:\n raise InvalidArgumentError(\"vector must me not empty list\")\n if not matrix:\n raise InvalidArgumentError(\"matrix must me not empty list\")\n if not all(isinstance(row, list) for row in matrix):\n raise InvalidArgumentError(f\"not all matrix rows are lists\")\n if not all(len(row)==len(matrix[0]) for row in matrix):\n raise InvalidArgumentError(f\"not all matrix rows are equal length\")", "def check_input_data(self, warning=False):\n for sheet, table in self.input_data.items():\n msg = (\n \"NaN values found in table:'{0}', column(s): {1}.\\n\"\n \"Empty cells are not allowed in a scenario to avoid \"\n \"unwanted behaviour.\\nRemove the whole column/row if \"\n \"a parameter is not needed (optional). Consider that 0, 'inf' \"\n \"or 1 might be neutral values to replace NaN values.\"\n )\n if isinstance(table, pd.DataFrame):\n table.dropna(thresh=1, inplace=True, axis=0)\n table.dropna(thresh=1, inplace=True, axis=1)\n if table.isnull().any().any():\n columns = tuple(table.loc[:, table.isnull().any()].columns)\n msg = msg.format(sheet, columns)\n if warning is True:\n warnings.warn(msg, UserWarning)\n else:\n raise ValueError(msg)\n self.input_data[sheet] = table.dropna(\n thresh=(len(table.columns))\n )\n else:\n if table.isnull().any():\n value = table.loc[table.isnull()].index\n msg = msg.format(sheet, value)\n if warning is True:\n warnings.warn(msg, UserWarning)\n else:\n raise ValueError(msg)\n\n if isinstance(self.input_data[\"volatile plants\"], pd.Series):\n self.input_data[\"volatile plants\"] = pd.DataFrame(\n self.input_data[\"volatile plants\"],\n columns=[self.input_data[\"volatile plants\"].name],\n )", "def is_legal(self):\n if not self._is_valid():\n return False\n\n if not self._check_all(self.get_rows()):\n return False\n\n if not self._check_all(self.get_cols()):\n return False\n\n if not self._check_all(self.get_blocks()):\n return False\n\n return True", "def is_valid(columns, row, col):\n # `row` is the current row; check against all previous rows\n for r in range(row):\n c = columns[r]\n # Check column\n if c == col:\n return False\n # Check diagonal\n if abs(c - col) == row - r:\n return False\n return True", "def _assert_2d(field):\n assert len(np.shape(field))==2, \"Variable fields must be a 2D array\"", "def test_row_from_columns_has_errors_table(self):\n errors_on_separate_row = True\n field_setup = None\n error_names = ['first', 'billing_address_1', 'billing_country_area']\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n has_no_errors = setup[-1]\n for row in setup:\n if row == has_no_errors:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n else:\n self.assertGreater(len(row['expected']), 1)\n self.assertGreater(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])", "def is_cols_valid(bd):\n for col in cols:\n seen = []\n for num in nums:\n if bd[col[num]] == \" \":\n continue\n elif bd[col[num]] not in seen:\n seen += [bd[col[num]]]\n else:\n return False\n else:\n continue\n return True", "def _is_valid(self):\n for cell in self._cells_iterable():\n if cell not in self._valid_elements:\n return False\n return True", "def test_columns_not_in_raw_var(self):\n\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"],\n )", "def test_shapes(self):\n\n # Creates a raw layer\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.raw.var.drop(\"feature_is_filtered\", axis=1, inplace=True)\n self.validator.adata.X = examples.adata_non_raw.X.copy()\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n\n # remove one gene\n self.validator.adata = self.validator.adata[:, 1:]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Number of genes in X (3) is different than raw.X (4).\"],\n )", "def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])", "def validate_data(values):\n try:\n [int(value) for value in values]\n if len(values) != 6:\n raise ValueError(\n f'Exactly 6 values are required - you provided {len(values)}'\n )\n except ValueError as e:\n print(f'Invalid data entered: {e}, please try again!\\n')\n return False\n\n return True", "def test_row_from_columns_no_errors(self):\n errors_on_separate_row = True\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n for row in setup:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])", "def validate(self, field, row):\n raise NotImplementedError", "def valid(self):\r\n if self.file_exists and len(self.missing_columns) == 0 and len(self.veg_columns) > 0 and \\\r\n len(self.lat_errors) == 0 and len(self.lon_errors) == 0 and len(self.time_errors) == 0 and len(self.date_errors) == 0:\r\n return True\r\n else:\r\n return False", "def check(self):\n for row in self.grid:\n for i in range(1, 10):\n if row.count(i) != 1:\n return False\n\n for col in range(9):\n lst = [row[col] for row in self.grid]\n for i in range(1, 10):\n if lst.count(i) != 1:\n return False\n \n for i in range(3):\n for j in range(3):\n lst = [row[j* 3:(j*3) + 3] for row in self.grid[i * 3:(i*3) + 3]] \n flat_list = []\n for k in lst:\n for number in k:\n flat_list.append(number)\n \n for check_number in range(1, 10):\n if flat_list.count(check_number) != 1:\n return False\n return True", "def check_param(self):\n check_tuple = (\"float16\", \"float32\", \"int32\")\n check_shape(self.shape_x, param_name=\"x\")\n check_shape(self.shape_indices, param_name=\"indices\")\n check_shape(self.shape_v, param_name=\"v\")\n check_dtype(self.dtype_x, check_tuple, param_name=\"x\")\n check_dtype(self.dtype_indices, (\"int32\",), param_name=\"indices\")\n check_dtype(self.dtype_v, check_tuple, param_name=\"v\")\n if len(self.shape_x) != len(self.shape_v):\n raise RuntimeError(\"The number of dimension x must\"\n \" be same as dimension v\")\n\n if self.shape_v[0] != self.shape_indices[0]:\n raise RuntimeError(\"The length of rank 0 of tensor v must\"\n \" be the same as length of indices\")\n\n if len(self.shape_indices) != 1:\n raise RuntimeError(\"The length of indices only support 1\")\n for i in range(1, len(self.shape_v)):\n if self.shape_x[i] != self.shape_v[i]:\n if not self.check_special():\n raise RuntimeError(\"The length of each rank of tensor x\"\n \" must be the same as length of\"\n \" each or next rank of tensor v\")", "def do_checks(self):\n # ## get valid experiment variables\n all_subexperiments = [1, 2, 3]\n all_plates = list(range(1, 19))\n all_cell_ids = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']\n all_samples = list(self.experiment.design['Sample'])\n all_genes = self.experiment.subexperiments[1].plates[1].samples[all_samples[0]].genes\n all_replicates = list(range(1, 7))\n all_time = [0.5, 1.0, 2.0, 3.0, 4.0, 8.0, 12.0, 24.0, 48.0, 72.0, 96.0]\n\n if self.time is None:\n if self.treatment is 'Baseline':\n self.time = [0.0, 96.0]\n else:\n self.time = all_time\n\n if self.cell_id is None:\n self.cell_id = all_cell_ids\n\n if self.gene is None:\n self.gene = all_genes\n\n if self.replicate is None:\n self.replicate = all_replicates\n\n if self.treatment is None:\n raise ValueError('treatment cannot be None. Specify one of \"TGFb\", \"Control\", \"Baseline\"')\n\n if not isinstance(self.treatment, str):\n raise ValueError('treatment must be a string. Got \"{}\" a \"{}\"'.format(\n self.treatment, type(self.treatment)\n ))\n\n if not isinstance(self.normed, bool):\n raise ValueError('normed argument should be boolean. Got \"{}\"'.format(\n type(self.normed)\n ))\n\n if not isinstance(self.time, list):\n self.time = [self.time]\n\n for time_point in self.time:\n if time_point not in sorted(list(set(self.data.columns.get_level_values(1)))):\n raise ValueError('\"{}\" is invalid time point. Valid time '\n 'points are: {}'.format(\n time_point, list(self.data.columns))\n )", "def validate_csv(filename, header, cols, rows):\n\n # open file\n data = pd.read_csv(filename, delimiter='|')\n\n # validate header\n assert header == '|'.join(list(data.columns.values))\n\n # validate column count\n assert data.shape[1] == cols\n\n # validate row count\n assert data.shape[0] == rows\n\n # return (header_result == column_result == row_result) is True", "def valid_ray(self, row, col):\n # if row nor col is at an edge space, returns False\n if row != 0 and row != 9 and col != 0 and col != 9:\n return False\n # ensures no corner spaces have been selected\n if row == 0 or row == 9:\n if col > 8 or col < 1:\n return False\n if col == 0 or col == 9:\n if row > 8 or row < 1:\n return False\n return True", "def check_square(self):\n if self.rows != self.cols:\n raise IndexError(\"Matrix is not square\")", "def is_valid_row(self, row):\n\n empty = (\"\", \"Vacant\")\n if not any(row.values()):\n return False\n if \"first name\" in row and \"last name\" in row:\n return row[\"last name\"] not in empty and row[\"first name\"] not in empty\n return row[\"name\"] not in empty", "def __check_row(self, x: int, y: int) -> bool:\n return not any([self.__maze[x, y + i] for i in (-1, 0, 1)])", "def valid(game_board, value, row, col):\n if len(value) > 1:\n value = \"X\"\n # Check row of new position\n for i in range(len(game_board[row])):\n if game_board[row][i] == value and i != col:\n return False\n\n # Check column of new position\n for i in range(len(game_board)):\n if game_board[i][col] == value and i != row:\n return False\n\n # Check the 3x3 square area\n start_row = 3 * (row // 3)\n start_col = 3 * (col // 3)\n for i in range(start_row, start_row+3):\n for j in range(start_col, start_col+3):\n if game_board[i][j] == value and i != row and j != col:\n return False\n\n return True", "def is_board_valid(bd):\n return is_rows_valid(bd) and is_cols_valid(bd) and is_sqrs_valid(bd)", "def test_with_valid_input(self):\n for dataset_type in ['regular', 'raw', 'REGULAR', 'RAW']:\n try:\n check_dataset_type(dataset_type)\n except ValueError:\n self.fail(\"Dataset {0} should be valid\".format(dataset_type))", "def _check_inputvalues(self):\n # Check x, y and z are int or float dtypes\n # ie do not contain any unusable values like strings\n if not (self.x.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")\n\n if not (self.y.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")\n\n # Performs checks on z if not empty\n if self.z is not None:\n for v in self.z.values():\n if not (v.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")", "def check_guide_input(df):\n if df.shape[1] < 5:\n raise ValueError('Input has ' + str(df.shape[1]) + ' columns, should be > 4')\n if df.shape[0] == 0:\n raise ValueError('Input has no rows')\n if df.iloc[:, [0, 1]].drop_duplicates().shape[0] != df.shape[0]:\n raise ValueError('The first two columns of input (guide 1 and guide 2) should uniquely identify each row')", "def _validate_params(row: pd.Series):\n try:\n float(row[\"Scale\"])\n except ValueError:\n print(\n \"Invalid Scale for: \"\n + row[\"Path\"]\n + \" Scale: \"\n + row[\"Scale\"]\n + \" is not a number\"\n )\n try:\n int(row[\"Columns to Exclude\"])\n except ValueError:\n print(\n \"Invalid Scale for: \"\n + row[\"Path\"]\n + \" Ignoring: \"\n + row[\"Columns to Exclude\"]\n + \" is not a number\"\n )", "def is_valid_board(self):\n total = sum(range(1, self.n+1))\n d = {x : [set([]), set([])] for x in range(1, self.n+1)}\n for row_index in range(self.n):\n for col_index in range(self.n):\n num = self.board[row_index][col_index]\n try:\n if row_index in d[num][0] or col_index in d[num][1]:\n print(\"Invalid solution.\")\n return\n except KeyError:\n print(\"Unsolved solution.\") # d[0]\n return\n\n d[num][0].add(row_index)\n d[num][1].add(col_index)\n print(\"Valid solution!\")", "def _check_data(self, labels, fluxes, flux_uncertainties, wavelengths=None):\n\n fluxes = np.atleast_2d(fluxes)\n flux_uncertainties = np.atleast_2d(flux_uncertainties)\n\n if len(labels) != fluxes.shape[0]:\n raise ValueError(\"the fluxes should have shape (n_stars, n_pixels) \"\n \"where n_stars is the number of rows in the labels array\")\n\n if fluxes.shape != flux_uncertainties.shape:\n raise ValueError(\"the flux and flux uncertainties array should have\"\n \" the same shape\")\n\n if len(labels) == 0:\n raise ValueError(\"no stars (labels) given\")\n\n if wavelengths is not None:\n wavelengths = np.atleast_1d(wavelengths)\n if wavelengths.size != fluxes.shape[1]:\n raise ValueError(\"mis-match between number of wavelength values\"\n \" ({0}) and flux values ({1})\".format(\n wavelengths.size, fluxes.shape[1]))\n\n return None", "def test_row_from_columns_no_errors_table(self):\n errors_on_separate_row = True\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n for as_type in ('p', 'ul', 'fieldset'):\n setup = self.setup_row_from_columns(as_type, field_setup, error_names, errors_on_separate_row)\n for row in setup:\n self.assertEqual(len(row['expected']), 1)\n self.assertEqual(len(row['actual']), 1)\n self.assertEqual(row['expected'], row['actual'])", "def validate_data(self, y, x=None, verbose=True):\n # Check dimensions\n if not self.ODE_order:\n if not y.ndim == 2:\n raise ValueError(\"y-array is not 2 dimensional, if ODE and you didn't provide y then x is one dim\")\n\n if verbose and y.shape[0] < y.shape[1]:\n print(\"Warning: y-array has more series (columns) than samples (rows). Check if this is correct\")\n\n # Checks for x\n if self.ODE_order and x is None:\n assert False\n if not x is None:\n\n # Check dimensions\n if not x.ndim == 2:\n raise ValueError(\"x-array is not 2 dimensional\")\n\n # Check shape equality\n if x.shape[0] != y.shape[0]:\n raise ValueError(\"y-array and x-array have different number of samples (rows)\")", "def test_sanity_check (self):\n X, Y = self.dm.get_data(std=True, lag_indicator=True)\n\n # Ensure number of rows between what we expect.\n row_bound = (800, 1000)\n actual_rows = X.shape[0]\n msg = 'Number of rows not within expected bounds.'\n self.assertTrue(row_bound[0] < actual_rows < row_bound[1], msg)\n\n msg = 'X and Y have different number of rows.'\n self.assertEqual(X.shape[0], Y.shape[0], msg)\n\n # Ensure X columns match.\n expected_x_cols = ['SP500', 'ltc_px_std', 'xrp_px_std', 'xlm_px_std',\n 'eth_px_std', 'btc_px_std', 'ltc_volume_std',\n 'xrp_volume_std', 'xlm_volume_std', 'eth_volume_std',\n 'btc_volume_std', 'lagged_others']\n actual_x_cols = X.columns.tolist()\n msg = 'Number of X columns different than expected.'\n self.assertEqual(len(actual_x_cols), len(expected_x_cols), msg)\n\n for col in expected_x_cols:\n msg = 'Expected column not found: {}'.format(col)\n self.assertTrue(col in actual_x_cols, msg)", "def _check_matrix(self, x, *args):\n if self._special and x.determinant() != 1:\n raise TypeError('matrix must have determinant one')\n F = self.invariant_bilinear_form()\n if x * F * x.transpose() != F:\n raise TypeError('matrix must be orthogonal with respect to the invariant form')\n # TODO: check that quadratic form is preserved in characteristic two", "def _check_array(X):\n return check_array(X,\n accept_sparse=['csr', 'csc'], # Accept sparse csr, csc\n order=None, # Do not enforce C or Fortran\n copy=False, # Do not trigger copying\n force_all_finite=True, # Raise error on np.inf/np.nan\n ensure_2d=True, # Force 'X' do be a matrix\n allow_nd=True, # Allow 'X.ndim' > 2\n warn_on_dtype=False # Mute as 'dtype' is 'None'\n )", "def check_input_data(self, align=True):\n return self.check_data_list(self.inputs, align)", "def check_data(self):\n\n for i in range(len(self.full_ed_lines)):\n if self.full_ed_lines[i].text() != \"\":\n if self.full_ed_lines[i].hasAcceptableInput():\n continue\n else:\n if i == 1:\n self.msg2Statusbar.emit('Неправильный формат версии! Исправьте и повторите действие!')\n elif i == 5:\n self.msg2Statusbar.emit('Неправильная почта! Исправьте и повторите действие!')\n return False\n else:\n self.msg2Statusbar.emit('Не все поля заполнены! Исправьте и повторите действие!')\n return False\n return True", "def _check_variables_in_raw_data(self):\n # make sure that all of the necessary variables are present\n # or mapped via the variable dict\n for key in [key for key in self._map_cols.keys()\n if key not in ['block', 'choice_accuracy', 'ID']]:\n assert self._map_cols[key] in self._raw_data.columns,\\\n 'missing {} from raw data df columns'.format(\n self._map_cols[key])\n\n condition_codes = self._raw_data[self._map_cols['condition']].unique()\n for cond in ['go', 'stop']:\n assert self._map_codes[cond] in condition_codes,\\\n ('missing {} from column: '.format(self._map_codes[cond]),\n self._map_cols[\"condition\"])\n\n # check that all unique non-nan values in the accuracy column \n # can be mapped onto either correct or incorrect,\n # as defined by the values in the var_dict.\n if self._map_cols['choice_accuracy'] in self._raw_data.columns:\n raw_acc_codes = self._raw_data[\n self._map_cols['choice_accuracy']].unique()\n raw_acc_codes = [i for i in raw_acc_codes if i==i]\n map_acc_codes = [self._map_codes['correct'],\n self._map_codes['incorrect']]\n for acc_code in raw_acc_codes:\n assert acc_code in map_acc_codes,\\\n '{} present in {} column.'. format(\n acc_code, self._cols[\"choice_accuracy\"]\n )\n\n return True", "def is_valid(self) -> bool:\r\n try:\r\n self.shape\r\n return True\r\n except ValueError:\r\n return False" ]
[ "0.69890326", "0.6909129", "0.6653121", "0.6620799", "0.6608853", "0.6602894", "0.65480334", "0.6546582", "0.65440375", "0.6527276", "0.649017", "0.64542824", "0.6438385", "0.6417175", "0.6406108", "0.6311875", "0.6278405", "0.6260668", "0.6255064", "0.6238612", "0.62377685", "0.6222857", "0.62165743", "0.6214945", "0.62121654", "0.62018585", "0.6194205", "0.617039", "0.6169317", "0.61635673", "0.6148553", "0.6133628", "0.61328447", "0.61291105", "0.61154324", "0.6109701", "0.61053437", "0.6102318", "0.61007977", "0.6082521", "0.6080804", "0.6073975", "0.60611516", "0.6041628", "0.6039869", "0.6038084", "0.6036274", "0.60275894", "0.6027555", "0.6024553", "0.6023922", "0.6018299", "0.5989402", "0.5960549", "0.5928185", "0.5920356", "0.591823", "0.5910615", "0.5906891", "0.59040946", "0.5888268", "0.58875513", "0.58788335", "0.5878668", "0.5876142", "0.58640134", "0.5840238", "0.5834969", "0.5832812", "0.58272177", "0.5825917", "0.58201504", "0.58146816", "0.58076704", "0.58012426", "0.5799954", "0.5797661", "0.5797581", "0.579713", "0.57906175", "0.5786288", "0.5782985", "0.578095", "0.57797736", "0.57746196", "0.5771749", "0.57614666", "0.5760361", "0.5758058", "0.57564104", "0.5751646", "0.5751106", "0.57496965", "0.57467103", "0.57465994", "0.573486", "0.57304263", "0.57300013", "0.5719038", "0.571073" ]
0.7375959
0
Convert any size1 arrays to scalars
def scalarise(dct): d = dct.copy() for subkey in [DEP, INDEP]: for k, v in d[subkey].items(): if isinstance(v, np.ndarray) and np.size(v) == 1: dct[subkey][k] = v.item()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scalararray(inp) -> np.ndarray:\n return np.array([None, inp], dtype=object)[[1]].reshape([])", "async def infer_shape_array_to_scalar(track, ary):\n shp = await ary['shape']\n if shp == ():\n return NOSHAPE\n else:\n raise MyiaTypeError(\n 'array_to_scalar only works on 0d arrays',\n refs=[ary]\n )", "def numpy_scalar(data):\r\n\r\n # handle case where data is numpy.array([])\r\n if data.ndim > 0 and (len(data.shape) == 0 or\r\n __builtins__['max'](data.shape) == 0):\r\n assert numpy.all(numpy.array([]) == data)\r\n raise EmptyConstantError()\r\n try:\r\n numpy.complex(data) # works for all numeric scalars\r\n return data\r\n except Exception:\r\n raise NotScalarConstantError(\r\n 'v.data is non-numeric, non-scalar, or has more than one'\r\n ' unique value', data)", "def is_scalar(x):\n return x.ndim == 0", "def atleast_1d(*arys):\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_1d')\n if a.ndim == 0:\n a = a.reshape(1)\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res", "def to_scalar(obj):\n if isinstance(obj, np.generic):\n return obj.item()\n else:\n return obj", "def is_scalar(x: Any) -> bool:\r\n return np.isscalar(x) or (isinstance(x, np.ndarray) and x.ndim == 0)", "def atleast_1d(*arrays):\n if len(arrays) == 1:\n a = arrays[0]\n if isscalar(a):\n a = add_axes(a, 1)\n return a\n else:\n assert len(arrays) > 1\n return [atleast_1d(a) for a in arrays]", "def test_cast_array(self):\n dim = Real(\"yolo\", \"uniform\", -3, 4)\n assert np.all(dim.cast(np.array([\"1\", \"2\"])) == np.array([1.0, 2.0]))", "def values(value, shape, dtype=K.floatx()):\n return value * ones(shape, dtype) # or zeros() + ?", "def _from_sequence(cls, scalars, dtype=None, copy=None):\n if isinstance(scalars, FletcherArray):\n return scalars\n if dtype and isinstance(dtype, FletcherDtype):\n dtype = dtype.arrow_dtype\n return cls(pa.array(scalars, type=dtype, from_pandas=True))", "def one_dim(a: cython.double[:]):\n a[0] *= 2\n return a[0], a.ndim", "def scalar(name=None, dtype=None):\r\n if dtype is None:\r\n dtype = config.floatX\r\n type = CudaNdarrayType(dtype=dtype, broadcastable=())\r\n return type(name)", "def numeric_normalize_types(*args):\n dtype = np.result_type(*[a.dtype for a in args])\n return [a.astype(dtype) for a in args]", "def _is_scalar(shape):\n return F.shape_mul(shape) == 1", "def solution(array):\n array1 = np.array(array)\n array2 = array1.astype(float)\n return array2", "async def infer_shape_scalar_to_array(track, x):\n return ()", "def test_cast_array(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 5)\n assert np.all(dim.cast(np.array([\"1\", \"2\"])) == np.array([1, 2]))", "def convert_scalar(self, v, t):\n return v.asnumpy().item()", "def is_np_scalar(x):\n return isinstance(x, np.generic)", "def numarray(a: list) -> list[float]:\n return [float(aa) for aa in a]", "def _as_scalar(res, dtype=None):\r\n if dtype is None:\r\n dtype = config.floatX\r\n if numpy.all(res.type.broadcastable):\r\n while res.owner and isinstance(res.owner.op, T.DimShuffle):\r\n res = res.owner.inputs[0]\r\n # may still have some number of True's\r\n if res.type.broadcastable:\r\n rval = res.dimshuffle()\r\n else:\r\n rval = res\r\n if rval.type.dtype[:3] in ('int', 'uin'):\r\n # We check that the upcast of res and dtype won't change dtype.\r\n # If dtype is float64, we will cast int64 to float64.\r\n # This is valid when res is a scalar used as input to a dot22\r\n # as the cast of the scalar can be done before or after the dot22\r\n # and this will give the same result.\r\n if theano.scalar.upcast(res.dtype, dtype) == dtype:\r\n return T.cast(rval, dtype)\r\n else:\r\n return None\r\n\r\n return rval", "def convertToPrimitiveArray(objectArray: typing.List[typing.Any]) -> typing.Any:\n ...", "def to_scalar(var):\n return var.view(-1).data.tolist()[0]", "def to_scalar(self, v):\n raise NotImplementedError('to_scalar')", "def standardize_single_array(x, expected_shape=None):\n if x is None:\n return None\n\n if is_composite_or_composite_value(x):\n return x\n\n if isinstance(x, int):\n raise ValueError(\n 'Expected an array data type but received an integer: {}'.format(x))\n\n if (x.shape is not None and len(x.shape) == 1 and\n (expected_shape is None or len(expected_shape) != 1)):\n if tensor_util.is_tf_type(x):\n x = array_ops.expand_dims(x, axis=1)\n else:\n x = np.expand_dims(x, 1)\n return x", "def standardize_single_array(x):\n if x is None:\n return None\n if tensor_util.is_tensor(x):\n x_shape_ndims = array_ops.rank(x)\n else:\n x_shape_ndims = len(x.shape)\n\n if (x_shape_ndims == 1 and (expected_shape is None or len(expected_shape) != 1)):\n if tensor_util.is_tensor(x):\n x = array_ops.expand_dims(x, axis=1)\n else:\n x = np.expand_dims(x, 1)\n return x", "def is_scalar(obj: _std_typing.Any) -> bool:\n return obj.ndim == 0", "def _asarray1d(arr, copy=False):\n if copy:\n return asarray(arr).flatten()\n else:\n return asarray(arr).ravel()", "def denormalise_0_1(value_or_array, array_min, array_max):\n if isinstance(value_or_array, list):\n raise ValueError('this function accepts arraylike data, not a list. '\n 'Please check data or convert list to numpy array')\n elif isinstance(value_or_array, float):\n denormalised = value_or_array * (array_max - array_min) + array_min\n elif isinstance(value_or_array, np.ndarray):\n denormalised = value_or_array * (array_max - array_min) + array_min\n elif isinstance(value_or_array, pd.Series):\n denormalised = value_or_array * (array_max - array_min) + array_min\n else:\n sys.stdout.write(\"Unknown datatype. denormalise_0_1 has been given an input that does not appear to be \"\n \"an int, float, np.ndarray or pandas Series\\n\"\n \"Attempting to process as if it is arraylike.....\")\n return denormalised", "def _make_array(x):\n try:\n x = np.asfarray(x).squeeze()\n except ValueError:\n pass\n return x", "def scalarDecorator(func):\n @wraps(func)\n def scalar_wrapper(*args,**kwargs):\n if numpy.array(args[0]).shape == ():\n scalarOut= True\n newargs= ()\n for ii in range(len(args)):\n if ii == 0:\n newargs= newargs+(numpy.array([args[ii]]),)\n else:\n newargs= newargs+(args[ii],)\n args= newargs\n else:\n scalarOut= False\n result= func(*args,**kwargs)\n if scalarOut:\n return result[0]\n else:\n return result\n return scalar_wrapper", "def forward(self, scalars):\n if self.num_scalars > 0:\n batch_size = scalars.shape[1]\n # sum over all nodes to ensure permutation invariance\n scalars = scalars.sum(2).permute(1, 2, 3, 0)\n # put the complex dimension at the end and collapse into one dimension of scalars\n scalars = scalars.contiguous().view((batch_size, -1))\n # apply linear mixing to scalars in each event\n predict = self.lin(scalars)\n else:\n predict = scalars\n\n return predict", "def _as_1d_ints(arr, n=None, xp=None):\n if xp is None:\n xp, on_gpu = get_array_module(arr)\n arr = xp.atleast_1d(xp.squeeze(arr))\n if arr.ndim > 1:\n raise ValueError(\"arr must be scalar or 1d\")\n if not issubclass(arr.dtype.type, np.integer):\n # float only OK if values are integers\n if not xp.all(xp.mod(arr, 1) == 0):\n raise ValueError(\"arr contains non-integer values\")\n if n is not None:\n if arr.size != n:\n if arr.size == 1:\n arr = xp.asarray([arr[0]] * n)\n else:\n raise ValueError(\n \"array did not have the expected size of {}\".format(n)\n )\n return arr.astype(np.intp) # case to ints", "def _scalar_vectorized(scalar, M):\n return scalar[:, np.newaxis, np.newaxis]*M", "def _from_sequence(cls, scalars):\n return cls(xnd.xnd(list(scalars)))", "def tensor2scalar(x):\n if isinstance(x, float):\n return x\n return x.cpu().detach().item()", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def floatX(arr):\n return np.asarray(arr, dtype=theano.config.floatX)", "def PLCTYPE_ARR_REAL(n: int) -> Type[Array]:\n return c_float * n", "def normalize_value(value: Any) -> Optional[Union[np.ndarray, List[Any], Tuple[Any]]]:\n if value is None:\n # Exclude None from falling through to blanket np.asarray conversion.\n return value\n\n if isinstance(value, (list, tuple, dict)):\n return value\n\n array = np.asarray(value)\n # TODO(#5359): Move into the function abi.\n if isinstance(value, (bool, int, float)):\n # Manually convert ints and floats to 32 bits.\n if array.dtype == np.float64:\n array = array.astype(np.float32)\n elif array.dtype == np.int64:\n array = array.astype(np.int32)\n\n return array", "def convert_array(self, v, t): # pragma: no cover\n return relay.const(tvm.ndarray.array(v, self.context))", "def scalar_mul(x, s, pub):\n x_shape = x.shape\n x_flatten = np.flatten(x)\n s_array = np.array([s for _ in range(len(x_flatten))])\n \n res = paillier_gpu.mul_impl(x_flatten, s_array)\n\n return np.reshape(res, x_shape)", "def is_scalar(self):", "def _from_sequence(cls, scalars):\n return cls(pa.array(scalars))", "def real(x):\n return x[..., 0]", "def retrun_1(x):\n ret = np.ones(len(x))\n return ret", "def arrayobj1d(inp: Iterable, copy=False) -> np.ndarray:\n return np.array([None] + list(inp), dtype=object, copy=copy)[1:]", "def square(array: ndarray) -> ndarray:\n return array * array", "def from_2D_to_1D(constant):\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant", "def _normalize(X: np.ndarray) -> np.ndarray:\n # return X * np.sqrt(1 / np.sum(X ** 2, axis=1))[:, None]\n return X * np.sqrt(X.shape[1] / np.sum(X ** 2, axis=1))[:, None]", "def numpy(x):\n if isinstance(x, torch.Tensor):\n x = x.detach().cpu().numpy()\n elif isinstance(x, tf.Tensor):\n x = x.numpy()\n return x.astype(np.float64)", "def to_ndarray(item):\n \n return type(item), sp.array(item, sp.float64, ndmin=1)", "def _ones_like(x):\n # Should only be used for small vectors.\n if x.get_shape().is_fully_defined():\n return array_ops.ones(x.get_shape().as_list(), dtype=x.dtype)\n return array_ops.ones_like(x)", "def _convert_to_np_array(inputs: Union[float, Tuple[float], np.ndarray], dim):\n outputs = None\n if isinstance(inputs, (tuple, np.ndarray)):\n outputs = np.array(inputs)\n else:\n outputs = np.full(dim, inputs)\n\n if len(outputs) != dim:\n raise ValueError(\"The inputs array has a different dimension {}\"\n \" than provided, which is {}.\".format(len(outputs), dim))\n\n return outputs", "def _convert_to_np_array(inputs: Union[float, Tuple[float], np.ndarray], dim):\n outputs = None\n if isinstance(inputs, (tuple, np.ndarray)):\n outputs = np.array(inputs)\n else:\n outputs = np.full(dim, inputs)\n\n if len(outputs) != dim:\n raise ValueError(\"The inputs array has a different dimension {}\"\n \" than provided, which is {}.\".format(len(outputs), dim))\n\n return outputs", "def scalar_mult(v, u):\n return [v[i] * u[i] for i in range(len(v))]", "def scalarMultiply(a, cols, x):\r\n\tr = len(a) * [None]\r\n\tfor i in range(len(a)):\r\n\t\tr[i] = cols * [None]\r\n\t\tfor j in range(cols):\r\n\t\t\tr[i][j] = a[i][j] * x\r\n\treturn r", "def normalize_array(var):\n if np.issubdtype(var.dtype, 'S1'):\n if var.dtype == str:\n # Python 2 on netCDF4 'string' variables needs this.\n # Python 3 returns false for np.issubdtype(var.dtype, 'S1')\n return var[:]\n\n def decoder(x):\n return str(x.decode('utf-8'))\n vfunc = np.vectorize(decoder)\n return vfunc(nc4.chartostring(var[:]))\n else:\n return var[:]", "def convert_scalar(self, v, t):\n return relay_from_scalar(v, type_to_np_dtype(t))", "def scalar_multiply(c, v):\n\treturn [c * v_i for v_i in v]", "def to_numpy(x):\r\n return x.squeeze().detach().cpu().numpy()", "def _is_scalar_from_shape(shape):\n return _logical_equal(_ndims_from_shape(shape), 0)", "def _to_ndarray(data):\n return np.atleast_1d(getattr(data, 'values', data))", "def normalize_input(inputs: [float]) -> [float]:", "def convert_array(self, v, t):\n return v.asnumpy()", "def scalar_multiply(c, v):\n return [c * v_i for v_i in v]", "def scalar_multiply(c, v):\n return [c * v_i for v_i in v]", "def _vec(A):\n N, m, n = A.shape\n return A.reshape((N, m*n, 1), order='F')", "def x(self) -> np.ndarray:\n return self.array[:, 1] if self.scalar_vector else self.array[:, 0]", "def _asarray(source, size):\n noise = source()\n if size is None:\n return noise.next()\n #count = reduce(operator.mul, shape)\n return numpy.asarray([noise.next() for _ in range(size)])", "def _image_to_vector(image):\n return image.flatten().astype(float)", "def asum (a, dimension=None,keepdims=0):\r\n if type(a) == N.ndarray and a.dtype in [N.int_, N.short, N.ubyte]:\r\n a = a.astype(N.float_)\r\n if dimension == None:\r\n s = N.sum(N.ravel(a))\r\n elif type(dimension) in [IntType,FloatType]:\r\n s = N.add.reduce(a, dimension)\r\n if keepdims == 1:\r\n shp = list(a.shape)\r\n shp[dimension] = 1\r\n s = N.reshape(s,shp)\r\n else: # must be a SEQUENCE of dims to sum over\r\n dims = list(dimension)\r\n dims.sort()\r\n dims.reverse()\r\n s = a *1.0\r\n for dim in dims:\r\n s = N.add.reduce(s,dim)\r\n if keepdims == 1:\r\n shp = list(a.shape)\r\n for dim in dims:\r\n shp[dim] = 1\r\n s = N.reshape(s,shp)\r\n return s", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def p2c(v: Union[Tuple, List, np.ndarray]) -> np.ndarray:\n if not isinstance(v, np.ndarray):\n v = np.array(v)\n if len(v.shape) == 1:\n v = np.array([v])\n return (np.array([np.cos(v[:, 1]), np.sin(v[:, 1])]) * v[:, 0]).T", "def ones(shape, dtype=None):\r\n if not isinstance(shape, (list, tuple, TensorVariable)):\r\n shape = [shape]\r\n if dtype is None:\r\n dtype = config.floatX\r\n return alloc(numpy.array(1, dtype=dtype), *shape)", "def scalar_vector_mult(alpha, v):\n return [alpha*x for x in v]", "def from_homogeneous(x):\n if not isinstance(x, np.ndarray):\n x = np.array(x)\n if len(x.shape) == 1:\n h = x[-1]\n if h != 0:\n x = x/h\n return x[0:-1]\n else:\n return None\n else:\n assert len(x.shape) == 2\n h = np.expand_dims(x[:, -1], axis=1)\n x = x / h\n return x[:, 0:-1]", "def cvt_points_to_vector(points):\n\n return numpy.hstack(points)", "def scalar_multiply(s: float, v: Vector) -> Vector:\n return [s * v_item for v_item in v]", "def __call__(self, *args):\n if isinstance(args[0], (float, int, complex)):\n # scalar version:\n # (operator.isNumberType(args[0]) cannot be used as it is\n # true also for numpy arrays\n return self.constant\n else: # assume numpy array\n if self._array_shape is None:\n self._set_array_shape()\n else:\n r = self.constant*ones(self._array_shape, 'd')\n # could store r (allocated once) and just return reference\n return r", "def itkSingleValuedCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_cast(*args)", "def ones(shape, dtype=None):\n\n return full(shape, 1, dtype)", "def __call__(self,x):\n arr = np.array(x,copy=False,dtype=float)\n\n if len(arr.shape) > 1:\n subshape = arr.shape[1:]\n elif len(arr.shape) == 1:\n subshape = tuple()\n else:\n raise ModelTypeError('Scalar cannot be fed into 2D model')\n\n try:\n arr = arr.reshape(2,np.prod(subshape))\n except ValueError:\n raise ModelTypeError('2D model input must have first dimension of length 2')\n\n return self._filterfunc(arr,*self.parvals).reshape(subshape)", "def mult_vect_by_scalar(v, a):\n return [a * i for i in v]", "def _asfarray(x):\n if hasattr(x, \"dtype\") and x.dtype.char in numpy.typecodes[\"AllFloat\"]:\n # 'dtype' attribute does not ensure that the\n # object is an ndarray (e.g. Series class\n # from the pandas library)\n if x.dtype == numpy.half:\n # no half-precision routines, so convert to single precision\n return numpy.asarray(x, dtype=numpy.float32)\n return numpy.asarray(x, dtype=x.dtype)\n else:\n # We cannot use asfarray directly because it converts sequences of\n # complex to sequence of real\n ret = numpy.asarray(x)\n if ret.dtype == numpy.half:\n return numpy.asarray(ret, dtype=numpy.float32)\n elif ret.dtype.char not in numpy.typecodes[\"AllFloat\"]:\n return numpy.asfarray(x)\n return ret", "def convert_raw_arrays(x, f):\n try:\n # Tensor, TensorNetwork...\n x = x.copy()\n x.apply_to_arrays(f)\n return x\n except AttributeError:\n pass\n\n try:\n # raw structured arrays that provide the {get|set}_params interface\n x = x.copy()\n x.set_params(tree_map(f, x.get_params()))\n return x\n except AttributeError:\n pass\n\n # other raw arrays\n return f(x)", "def l1_normalize(x: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name\n return x / x.sum()", "def convert_scalar(self, v, t):\n return relay.const(v, type_to_np_dtype(t))", "def test_scalar_index(self):\n dset = self.f.create_dataset('x', shape=(), dtype='f')\n out = dset[...]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, ())", "def test_normalization_scalar(features: List[List[float]]) -> List[List[float]]:\n normalized_features = []\n for feature in features:\n sum_squares = 0\n for i in feature:\n sum_squares += i * i\n sum_squares_root = np.sqrt(sum_squares)\n if sum_squares == 0:\n normalized_features.append(feature)\n else:\n normalized_features.append([x / sum_squares_root for x in feature])\n return normalized_features", "def normalise_0_1(arraylike):\n array_min = np.min(arraylike)\n array_max = np.max(arraylike)\n normalised = (arraylike - array_min) / (array_max - array_min)\n # convert to float\n normalised = np.array(normalised).astype(float)\n return normalised, array_min, array_max", "def asarray(val, dtype=np.float64):\n # val is a list, tuple etc\n if not np.isscalar(val) and np.ndim(val) > 0:\n np_val = np.asarray(val, dtype=dtype)\n else:\n # val is a scalar number\n np_val = np.asarray([val], dtype=dtype)\n\n return np_val", "def arg2array(arg):\n if isinstance(arg, (matrix, ndarray)):\n s = arg.shape\n if len(s) == 1:\n return array(arg)\n if min(s) == 1:\n return array(arg).flatten()\n \n elif isinstance(arg, list):\n return array(arg)\n \n elif isinstance(arg, (int, float, float32, float64)):\n return array([arg])\n \n raise ValueError", "def is_scalar(val,\n include_np: bool = True,\n include_torch: bool = True) -> bool:\n if isinstance(val, numbers.Number):\n return True\n elif include_np and isinstance(val, np.ndarray) and val.ndim == 0:\n return True\n elif include_torch and isinstance(val, torch.Tensor) and len(val) == 1:\n return True\n else:\n return False", "def test_convert_scalar():\n foo = Value(scalars=1.2)\n assert foo.scalars[0].value == 1.2" ]
[ "0.6902022", "0.63372874", "0.6325084", "0.62974435", "0.62791", "0.62652755", "0.62318766", "0.62222445", "0.609567", "0.59744406", "0.59497917", "0.5910324", "0.5909021", "0.5878803", "0.58734983", "0.5865044", "0.586246", "0.5857849", "0.58351564", "0.5832479", "0.58043796", "0.580284", "0.5801109", "0.5776383", "0.57740146", "0.577219", "0.5770691", "0.57676727", "0.57523483", "0.5703288", "0.56874925", "0.56834304", "0.563522", "0.5623021", "0.5609272", "0.5596976", "0.5593237", "0.5585426", "0.5585426", "0.5585426", "0.5585426", "0.5574649", "0.5570874", "0.55455756", "0.55260533", "0.55182886", "0.5513339", "0.55058163", "0.54935604", "0.54746586", "0.54633194", "0.54437745", "0.54393053", "0.54295534", "0.54289997", "0.5423027", "0.54103535", "0.5408907", "0.5408907", "0.5408063", "0.5400596", "0.5396992", "0.53909194", "0.53892434", "0.5385846", "0.5384702", "0.53708786", "0.5368842", "0.53636986", "0.5348189", "0.5348189", "0.53428996", "0.5336788", "0.5311412", "0.5310146", "0.53095603", "0.53002185", "0.53002185", "0.5299198", "0.5295668", "0.5292181", "0.52848583", "0.528433", "0.52820885", "0.5275776", "0.5272058", "0.5271243", "0.52694345", "0.5269262", "0.5267551", "0.526187", "0.526014", "0.52541745", "0.5253408", "0.5251334", "0.52436036", "0.5235466", "0.52345645", "0.5232983", "0.52263165" ]
0.5335528
73
Add a new entry
def add( self, indep, key=None, value=None, dep=None, keys=None, values=None, **kwargs ): if key is not None and value is not None: if isinstance(key, str): self.add_value(indep, key, value) elif isinstance(key, list): self.add_array(indep, key, value) elif keys is not None and values is not None: self.add_array(indep, keys, values) elif dep is not None: self.add_dict(indep, dep) elif isinstance(key, dict): self.add_dict(indep, key) elif len(kwargs) > 0: self.add_dict(indep, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_new_entry(self):\n clear_screen()\n new_entry = Entry.create()\n if new_entry is None:\n print(\"Add new entry cancelled. Returning to main menu...\")\n time.sleep(1)\n return None\n self.entries.append(new_entry)\n with open(self.file_name, \"a\") as file:\n writer = csv.writer(file)\n writer.writerow([new_entry.date, new_entry.name, new_entry.minutes, new_entry.note])", "def add_entry(self, *args, **kwargs):\n entry = Entry(*args, **kwargs) # NOTE: not sure this is good\n self._entries[entry.uuid] = entry\n return entry", "def add_entry(self, entry):\n if self.get_entry(entry):\n return entry\n\n keys, values = [], []\n for i in entry:\n keys.append(\"'{}'\".format(i))\n if not isinstance(entry[i], str):\n values.append(\"'{}'\".format(str(entry[i])))\n else:\n values.append(\"'{}'\".format(entry[i]))\n\n keys.append(\"'hash'\")\n values.append(\"'{}'\".format(self._calculate_hash(entry)))\n sql = 'INSERT INTO {t_id} ({keys}) VALUES ({values})'.format(\n t_id=self.table_id, keys=','.join(keys), values=','.join(values))\n self.fusiontables.query().sql(sql=sql).execute()", "def create_and_add_entry(self, **attrs):\n return self.add_entry(self.create_entry(**attrs))", "def add_entry(self, entry: str) -> None:\n self.entries.append(f\"{self.count}: {entry}\")\n self.count += 1", "def add_entry(self, number: int, entry: Entry) -> None:\n raise NotImplementedError", "def add_entry(unique_ID,value,label):\n\t\ttry:\n\t\t\tdata[unique_ID].appendEntry(value,label)\n\t\texcept InvalidInput:\n\t\t\t#deal with bad input\n\t\t\tpass", "def add(self, entry):\n \"An entry is a tuple of (id, datatime, text).\"\n id = entry[0]\n datee = entry[1]\n text = re.sub('[^A-Za-z0-9]+', ' ', entry[2].lower())\n self.recordsDict[id].create(id, datee, entry[2])\n for word in text.split():\n self.wordDict[word].add(id)", "def add_entry_to_db(entry):\n db.session.add(entry)\n db.session.commit()", "def add_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n introduction = request.form['introduction']\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n new_entry = Entries(title, category, buydate, introduction)\n db.session.add(new_entry)\n\n try :\n db.session.commit()\n except IntegrityError as e :\n flash(e.message)\n return redirect(url_for('show_entries_admin'))\n\n flash(u'成功添加新的条目')\n return redirect(url_for('show_entries_admin'))", "def new_entry():\n clear_screen()\n entry = {}\n entry['id'] = get_next_id()\n entry['name'] = input_name()\n print(\"How many minutes did you spend on {}?\".format(entry['name']))\n print(\"Or you may specify a format after the time, seperated by a comma\")\n entry['time_spent'] = input_time_spent()\n add_notes = input(\"Add notes? Y/n \").lower()\n if add_notes != 'n':\n entry['notes'] = input_notes()\n entry['date'] = datetime.now().strftime(FMT_MONTH_DAY_YEAR)\n with open(WORK_LOG_FILENAME, 'a', newline='') as work_log:\n work_log_writer = csv.DictWriter(work_log, fieldnames=FIELDNAMES)\n work_log_writer.writerow(entry)", "def add_entry(self, entry):\n self.precomposed_entries.append(entry)", "def add_new_entry(self, ent):\n ent.inserted = time.strftime(\"%D\")\n ent = self.add_entry(ent)\n if ent is not None:\n self.modified_collection = True\n return ent", "async def add_entry(self, **values):\r\n query = \"INSERT OR IGNORE INTO {table_name} ({table_headers}) VALUES({entry_values})\"\r\n\r\n headers = \", \".join([e for e in values.keys()])\r\n entry_val = \", \".join(\"?\"*len(values.values()))\r\n attrs = [e for e in values.values()]\r\n\r\n query = query.format(table_name = self.name, table_headers=headers, entry_values=entry_val)\r\n\r\n await self.data.db.execute(query, attrs)\r\n await self.data.db.commit()", "def add_entry_to_bibtex_db(self, ent):\n\n # add additional fields manually to the dict\n ent.consolidate_dict()\n self.bibtex_db.entries.append(ent.raw_dict)\n # the following updates the entries dict\n # self.bibtex_db.get_entry_dict()\n # # make sure it's there\n # if ent.ID not in self.bibtex_db.entries_dict:\n # self.bibtex_db.entries_dict[ent.ID] = ent.raw_dict", "def create_entry(entry):\n Entry.create(**entry)\n return entry", "def add_entry():\n if not session.get('logged_in'):\n abort(401)\n\n if request.method == 'POST':\n db = get_db()\n cur = db.execute('insert into entries (title, ingredients, steps, \\\n tags, url) values (?, ?, ?, ?, ?)',\n [request.form['title'], request.form['ingredients'],\n request.form['steps'], request.form['tags'],\n request.form['url']])\n db.commit()\n flash('Recipe, ' + escape(request.form['title'])\n + ', was successfully added', 'success')\n return view_entry(str(cur.lastrowid))\n else:\n return render_template('add_entry.html')", "def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )", "def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )", "def add_entry(self, entry): # Hashmap.add_entry\n\n if entry.hexdigest in self.contentHash:\n self.contentHash[entry.hexdigest].append(entry)\n else:\n self.contentHash[entry.hexdigest] = [ entry ]\n\n if entry.depth < self.minDepth:\n self.minDepth = entry.depth", "def add_entry(name, title, duration, notes):\n clear()\n print('Entry added to work log!')\n return Entry.create(\n employee_name=name,\n task_title=title,\n time_spent=duration,\n task_notes=notes\n )", "def add_entry(self, account):\n def txn():\n entry = self.entries.filter('account =', account).get()\n if not entry:\n entry = Entry(account=account, parent=self)\n entry.put()\n created = True\n else:\n created = False\n return entry, created\n return db.run_in_transaction(txn)", "def append(self, entry):\n if not isinstance(entry, Entry):\n raise ValueError('Not an tlv8.Entry: {e}'.format(e=entry))\n self.data.append(entry)", "def _add_entry(self, cat_entry):\n\n # run through category apps and add orphans to Desktop\n # database, add DM and categories to database\n models.cat_apps(cat_entry)\n\n # run through and categories to database\n models.cat_list(cat_entry.categories)\n\n # create new - models.py \n cat_record = models.Categories(category=cat_entry.category) \n\n # fill in values \n cat_record.fill_record(cat_entry) \n\n BaseInfo.session.add(cat_record)\n\n try:\n BaseInfo.session.commit( )\n except exc.SQLAlchemyError:\n logger.error(\"Commit error\")", "def add_entry(self, ent, can_replace=True):\n if self.has_entry(ent.ID):\n if not can_replace:\n self.visual.error(f\"Entry {ent.ID} already exists in the collection!\")\n return None\n # delete existing, to replace\n self.remove(ent)\n ent = self.add_entry_to_collection_containers(ent)\n if ent is None:\n return ent\n self.add_entry_to_bibtex_db(ent)\n self.visual.log(f\"Added ID: {ent.ID}\")\n return ent", "def feed(self, entry):\r\n if entry.name not in self.names:\r\n self.names[entry.name] = list()\r\n self.names[entry.name].append(entry)", "def add_a_new_entry(self):\n id = self.input_id()\n name = self.input_name()\n birthday = self.input_birthday()\n midterm = self.input_score(1, 'Input Midterm Score')\n finalterm = self.input_score(1, 'Input Finalterm Score')\n\n new_list = pd.DataFrame(\n [[id, name, pd.Timestamp(birthday), midterm, finalterm, np.nan, np.nan]],\n columns=self.columns)\n new_list.astype(self.dtype)\n\n self.merge_list(new_list)", "def db_add_entry(person):\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n if person.name in db:\n print(\"Updating existing entry ..... {name}\\n\".format(name=person.name))\n else:\n person.new = True\n print(\"Adding new entry ..... {name}\".format(name=person.name))\n db[person.name.capitalize()] = person.phone\n db.sync()\n db.close()\n db_show_all()", "def add_entry(self, new_entry):\n existing_entry = self._entries.get(new_entry.key)\n if existing_entry is not None:\n existing_entry.add_menge(new_entry.get_menge())\n for occ in new_entry.occurrences:\n existing_entry.add_occurrence(occ)\n return existing_entry\n else:\n self._entries[new_entry.key] = new_entry\n self._order.append(new_entry.key)\n return None", "def creating_entry(self):\n response = \"\"\n today = str(date.today())\n curent_time = str(datetime.time(datetime.now()))\n entry = Diary(self.entry_id, self.title, self.body)\n lst = {}\n lst[\"entry_id\"] = entry.entry_id\n lst[\"title\"] = entry.title\n lst[\"date\"] = today\n lst[\"time\"] = curent_time\n lst[\"body\"] = entry.body\n lst[\"updated\"] = entry.updated\n if Validate.validate_entry(Diary.entries, entry):\n response = jsonify({\"message\": \"Duplicate data,Try again\"})\n response.status_code = 409\n else:\n Diary.entries.append(lst)\n response = jsonify({\"message\": \"Entry saved\", \"data\": lst})\n response.status_code = 201\n return response", "def add_entry(\n self,\n the_id: str,\n the_name: str,\n the_parent: str = '') -> None:\n\n # validate inputs\n the_id, the_name, the_parent = self._validate_entry(the_id, the_name, the_parent)\n\n # verify that the_id doesn't already exist\n if the_id in self.labels:\n raise KeyError('the_id = {} already exists'.format(the_id))\n\n # check if name is already being used, and warn if so\n for key, value in self.labels.items():\n if value == the_name:\n logger.warning(\n 'Note that id {} is already using name {}. Having repeated names is '\n 'permitted, but may lead to confusion.'.format(key, value))\n\n # add the entry into the labels and subtypes dicts and reset the values\n # perform copy in case of failure\n labels = self.labels.copy()\n subtypes = self.subtypes.copy()\n labels[the_id] = the_name\n if the_parent in subtypes:\n subtypes[the_parent].append(the_id)\n else:\n subtypes[the_parent] = [the_id, ]\n\n try:\n self.set_labels_and_subtypes(labels, subtypes)\n except (ValueError, KeyError) as e:\n logger.error(\n 'Setting new entry id {}, name {}, and parent {} failed with '\n 'exception {}'.format(the_id, the_name, the_parent, e))", "def add_entry(self, message_id):\n message_words = set(self.message_corpus[message_id].split())\n entry = Entry(priority=self.get_priority(message_words), wordset=message_words, id=message_id)\n self.pq.put( entry )", "def add_entry(self, entry: Entry) -> bool:\n for e in self.get_entries():\n if e.get_name() == entry.get_name():\n return False\n self.__entries.append(entry)\n self.__entries.sort()\n return True", "def add_item_entry(self, the_spec):\n debug(\"Adding entry {}\".format(the_spec))\n entry = tk.Entry(self.current_parent)\n self.entries[the_spec.value] = entry\n if not self.parent_is_grid:\n entry.pack()\n return entry", "def append_entry(host, email, password, mailbox):\n\n new_entry = {\n\n 'host': host,\n 'email': email,\n 'password': password,\n 'mailbox': mailbox\n }\n\n with open('data.json') as f:\n data = load(f)\n\n data[\"items\"].append(new_entry)\n\n with open('data.json', 'w') as outfile:\n dump(data, outfile, indent=4)\n\n print('\\nNew Entry Added Successfully!')", "def _add_entry(self, entry_id: int, text: str, category=None, new_field_dict=None):\n if category is None:\n category = self.active_category\n if category is None:\n raise ValueError(\"Cannot add entry without specifying category if 'active_category' is None.\")\n if entry_id < 0:\n self.CustomDialog(\"Entry ID Error\", message=f\"Entry ID cannot be negative.\")\n return False\n if entry_id in self.get_category_data():\n self.CustomDialog(\n title=\"Entry ID Error\",\n message=f\"Entry ID {entry_id} already exists in category {camel_case_to_spaces(self.active_category)}.\",\n )\n return False\n\n self._cancel_entry_id_edit()\n self._cancel_entry_text_edit()\n self.get_category_data()[entry_id] = new_field_dict # add entry to category dictionary\n self._set_entry_text(entry_id, text)\n self.select_entry_id(entry_id, set_focus_to_text=True, edit_if_already_selected=False)\n\n # TODO\n # if from_history:\n # self.jump_to_category_and_entry(category, text_id)\n # if not from_history:\n # self.action_history.record_action(\n # undo=partial(self._delete_entry, category, text_id),\n # redo=partial(self._add_entry, category, text_id, text),\n # )\n # self.unsaved_changes.add((self.active_category, text_id, 'add'))\n\n return True", "def add_entry():\n\n host = input('\\nEnter Mail Server Host: ')\n email = input('\\nEnter Email ID: ')\n password = getpass(prompt='\\nEnter Password: ')\n mailbox = input('\\nEnter MailBox: ')\n mobile = input('\\nEnter Mobile Number: ')\n\n if not isfile('data.json'):\n print('No input data.json found...')\n create_input_file()\n\n append_entry(host, email, password, mailbox)", "def append(self, entry):\n self.strings.append(entry)", "def add_entry(self, dn, attrs):\n # Convert our dict to nice syntax for the add-function using modlist-module\n if attrs and dn:\n ldif = modlist.addModlist(attrs)\n # Do the actual synchronous add-operation to the ldapserver\n logger.info(\"add entry %s.\" % ldif)\n self._conn.add_s(dn, ldif)", "def addEntry(self, symbol, address):\n self.table[symbol] = address", "def add_entry(self, entry_or_resource):\n def validate_resource_type(data):\n if 'resourceType' not in data:\n raise ValueError(f\"ill formed bundle entry: {data}\")\n\n if 'resource' not in entry_or_resource:\n # Bundles nest each entry under a 'resource'\n validate_resource_type(entry_or_resource)\n entry = {'resource': entry_or_resource}\n else:\n validate_resource_type(entry_or_resource['resource'])\n entry = entry_or_resource\n\n self.entries.append(entry)", "def add(self, entry):\n s = sppasUnicode(entry)\n entry = s.to_strip()\n if self.__case_sensitive is False:\n s = sppasUnicode(entry)\n entry = s.to_lower()\n\n if entry not in self.__entries:\n self.__entries[entry] = None\n return True\n\n return False", "def create_entry(self, entry_group_name, entry_id, entry):\n try:\n entry = self.__datacatalog.create_entry(parent=entry_group_name,\n entry_id=entry_id,\n entry=entry)\n self.__log_entry_operation('created', entry=entry)\n return entry\n except (exceptions.FailedPrecondition,\n exceptions.PermissionDenied) as e:\n entry_name = '{}/entries/{}'.format(entry_group_name, entry_id)\n self.__log_entry_operation('was not created',\n entry_name=entry_name)\n raise e", "def add_log_entry(self, log_entry):\n self.log_entries.append(log_entry)", "def add_entry(self, start_day, start_hour, stop_day, stop_hour, mode, radar=[\"ALL\"]):\n self.entries.append(Entry(self.year, self.month, start_day, start_hour, stop_day, stop_hour, mode, radar))", "def add(self, update, context):\n\n telegram_user = update.message.from_user\n\n if len(context.args) != 2:\n message = \"Sorry! I could not add the entry! Please use the the command passing the following arguments:\\n\\n /add <url> <entryname> \\n\\n Here is a short example: \\n\\n /add http://www.feedforall.com/sample.xml ExampleEntry\"\n update.message.reply_text(message)\n return\n print(f'context.args: {context.args}')\n # arg_url = FeedHandler.format_url_string(string=context.args[0])\n arg_url = context.args[0]\n arg_entry = context.args[1]\n print(f'arg_entry: {arg_entry}')\n print(f'arg_url: {arg_url}')\n\n # Check if argument matches url format\n # if not FeedHandler.is_parsable(url=arg_url):\n # message = (\n # \"Sorry! It seems like '\"\n # + str(arg_url)\n # + \"' doesn't provide an RSS news feed.. Have you tried another URL from that provider?\"\n # )\n # update.message.reply_text(message)\n # return\n\n # Check if entry does not exists\n entries = self.db.get_urls_for_user(telegram_id=telegram_user.id)\n print(entries)\n\n if any(arg_url in entry for entry in entries):\n message = (\n \"Sorry, \"\n + telegram_user.first_name\n + \"! I already have that url with stored in your subscriptions.\"\n )\n update.message.reply_text(message)\n return\n\n if any(arg_entry in entry for entry in entries):\n message = (\n \"Sorry! I already have an entry with name \"\n + arg_entry\n + \" stored in your subscriptions.. Please choose another entry name or delete the entry using '/remove \"\n + arg_entry\n + \"'\"\n )\n update.message.reply_text(message)\n return\n\n self.db.add_user_bookmark(\n telegram_id=telegram_user.id, url=arg_url, alias=arg_entry\n )\n message = \"I successfully added \" + arg_entry + \" to your subscriptions!\"\n update.message.reply_text(message)", "def add(self, key, value):", "def add_entry(self, scenario_info):\n print(\"--> Adding entry in execute table on server\")\n entry = \"%s,created\" % scenario_info[\"id\"]\n command = \"echo %s >> %s\" % (entry, self._server_path)\n err_message = \"Failed to update %s on server\" % self._EXECUTE_LIST\n _ = self._execute_and_check_err(command, err_message)", "def post(self):\n data = request.json\n create_entry(data)\n return None, 201", "def add_entry(results):\n db = get_db()\n data_to_insert = format_json_to_dicts(results)\n\n db.executemany(\"insert into python_repos ( repo_id, name, url, created_date, last_push_date, description, stars, avatar) \\\n values (:repo_id, :name, :url, :created_date, :last_push_date, :description, :stars, :avatar)\", data_to_insert)\n\n db.commit()\n flash('Updated ' + str(time.strftime(\"%Y-%m-%d %H:%M\")))\n return redirect(url_for('show_entries'))", "def add_entry_to_collection_containers(self, ent):\n\n ID = ent.ID.lower()\n title = ent.title.lower()\n # update object lookup dict\n if ID in self.entries:\n self.visual.error(\"Entry with id {} already in entries dict!\".format(ID))\n return None\n self.entries[ID] = ent\n # update title-id mapping\n self.title2id[title] = ID\n for auth in ent.author:\n if auth not in self.author2id:\n self.author2id[auth] = []\n self.author2id[auth].append(ID)\n\n # update ids and titles lists\n self.id_list.append(ID)\n self.title_list.append(title)\n # update maximum ID / title lengths\n if len(ent.ID) > self.maxlen_id:\n self.maxlen_id = len(ent.ID)\n if len(ent.title) > self.maxlen_title:\n self.maxlen_title = len(ent.title)\n if ent.file:\n self.all_pdf_paths.append(ent.file)\n return ent", "def addEntry(self, entry):\n \n with open(self.current_log, 'ab') as a:\n logAppender = csv.writer(a, delimiter=\"|\")\n logAppender.writerow(entry)", "def add(self, key, value):\n self.data.append((key, value))", "def add(\n description: str = typer.Argument(\n ...,\n help=\"Description of the log entry\"\n ),\n date: datetime = typer.Option(\n datetime.now().strftime(\"%Y-%m-%d\"), '--date', '-d',\n help=\"Date of the log entry\"\n ),\n time: datetime = typer.Option(\n datetime.now().strftime(\"%I:%M %p\"), '--time', '-t',\n formats=[\"%H:%M:%S\", \"%I:%M %p\"],\n help=\"Time of the log entry\"\n )\n):\n log_entry_time = time.time()\n log_datetime = datetime.combine(date, log_entry_time)\n\n manager = LogBookManager()\n created, message = manager.create(description, log_datetime)\n\n if created:\n typer.echo(\n typer.style(message, fg=typer.colors.GREEN, bold=True)\n )\n else:\n typer.echo(\n typer.style(message, fg=typer.colors.RED, bold=True)\n )", "def add_entry(title: str, datetime: pendulum.datetime) -> None:\n datetime = datetime.in_tz('UTC')\n conn = sqlite3.connect('rss.db')\n c = conn.cursor()\n c.execute(\n \"\"\"insert into entries values\n (?, ?, ?, ?)\"\"\",\n (title, datetime.year, datetime.month, datetime.day)\n )\n conn.commit()\n conn.close()", "def test_addEntryByString(self):\n b = self.g.add_entry('foo')\n self.assertTrue(b)", "def test_addEntryByDict(self):\n self.g.entryFormat = ['term', 'tags', 'value']\n b = self.g.add_entry({'term': 'foo', 'tags': 'a', 'value': '1'})\n self.assertTrue(b)", "def add(self, name, value) -> None:\n ...", "def create_entry():\n new_entry = DB_Entry() # Create instance of entry to add the info to\n print('Eratosthenes is ready to add your new entry.\\n')\n new_entry.set_id()\n title = input('Enter the title:\\n')\n new_entry.set_title(title)\n authors = input('Enter the authors as list of surname, firstname separated by semicolons:\\n')\n new_entry.set_authors(authors)\n try:\n year = int(input('Enter the year:\\n'))\n except ValueError:\n try:\n year = int(input('Enter the year as an integer:\\n'))\n except ValueError:\n print('You failed to follow basic instructions. The year is set to 2000\\n')\n year = 2000\n new_entry.set_year(year)\n pub_type = input('Enter the publication type as article/review/book/other:\\n')\n try:\n new_entry.set_type(pub_type)\n except ValueError:\n try:\n pub_type = input('Type must be one of article/review/book/other:\\n')\n new_entry.set_type(pub_type)\n except ValueError:\n print('You failed to follow basic instructions. Type is now set to \\'other\\'\\n')\n pub_type = 'other'\n new_entry.set_type(pub_type)\n keywords = input('Enter list of keywords separated by semicolons:\\n')\n new_entry.set_keywords(keywords.split(';'))\n current_path = input('Enter the current path to the file\\n')\n current_path = current_path.replace('~', '/Users/marcus')\n if not os.path.isfile(current_path):\n print('File not found. Please try again')\n current_path = input('Enter the current path to the file\\n')\n if not os.path.isfile(current_path):\n print('File not found')\n new_entry.set_new_path()\n db_actions.copy_file(new_entry.get_path(), current_path)\n return new_entry", "def testAddEntry(self):\n db = beerlogdb.BeerLogDB(self.DB_PATH)\n db.known_tags_list = {\n 'char1': {'name': 'toto', 'glass': 33},\n }\n db.AddEntry('char1', 'pic1')\n db.AddEntry('char1', 'pic1')\n self.assertEqual(db.CountAll(), 2)", "def add(self, record):\n self._hist_records[record.uid] = record", "def new_entry(path, name):\n\n default_config = {'prompt': \"Select command to run:\", 'choices': {}}\n with open(path, 'w') as f:\n json.dump(default_config, f)\n\n add_entry_to_database(path, name)", "def add(self, item):", "def test_addEntryByList(self):\n self.g.entryFormat = ['term', 'tags', 'value']\n b = self.g.add_entry(['foo', 'a', '1'])\n self.assertTrue(b)", "def add(self, data):\n if self._filter(data):\n id = self.db._generate_id(data)\n \n if not id == None:\n if self.db._store:\n self.db.append(id, str(data))\n print id, \"stored to\", self.db._generate_path(id)\n else:\n print id\n print data.show2()", "def add_entry():\n username = util.remove_commas_from_string(request.form[\"name\"])\n link = util.remove_commas_from_string(request.form[\"ytLink\"])\n song = util.remove_commas_from_string(request.form[\"songName\"])\n\n festive = CHRISTMAS_MODE and \"christmasSong\" in request.form\n\n with database.connect_to_database() as db:\n user_id = database.get_userid(db, username)\n database.add_song(db, link, song, user_id, month=12 if festive else None)\n\n return redirect(url_for('main'))", "def add(self, entry):\n score = entry.get_score()\n\n # Does the new entry qualify as high score.\n # The high score is True if board not full or score is higher than last entry\n good = self._n < len(self._board) or score > self._board[-1].get_score()\n\n if good:\n if self._n < len(self._board): # no score drops from list\n self._n += 1 # overall number increases\n\n\n # shift lower scores rightwards to make entry\n j = self._n - 1\n while j > 0 and score > self._board[j-1].get_score():\n self._board[j] = self._board[j-1] # shift entry from j-1 to j\n j -= 1 # decrement j\n self._board[j] = entry # well done, add new entry", "def _store_entry_in_table(conn, table_name, entry):\n # Create entry insertion template.\n template = ('?, ' * len(entry)).rstrip(', ') # \"?\" for each value\n template = '(%s)' % template # enclose in parentheses\n # Try to insert a new row into the table.\n with conn:\n cur = conn.cursor()\n cur.execute('INSERT INTO %s VALUES%s' % (table_name, template), entry)", "def addQueueEntry(*args):\n try:\n #A unique id for each command.\n self.cmd_seq = self.cmd_seq + 1\n #Create a new queu entry\n self.entries[self.cmd_seq] = _QueueEntry(self, name, args, self.cmd_seq, self.log)\n #append it to the command queue\n self.queue.append(self.cmd_seq)\n #Return handle to the new entry for setting callbacks on.\n return self.entries[self.cmd_seq]\n except Exception as ex:\n self.log.failure(\"Error in addQueueEntry {err!r}\",err=str(ex))", "def addItem(*args):", "def addItem(*args):", "def addItem(*args):", "def add_entries(self, *entries: Entry):\n for entry in entries:\n self.add_entry(entry)", "def add_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n je.Editor(self.session, self.source.tbl, self.source)", "def add(self):\n pass", "def add(self, item):\n self._dict[item] = item", "def add_record(self, record):\n logging.debug('Adding new entry to table')\n if not self._dbconnect or not self._cursor:\n raise Exception('Invalid call to Context Manager method!')\n\n date = record.get('date', '')\n time = record.get('time', '')\n location = record.get('location', '')\n node_id = record.get('nodeID', '')\n\n if '' in (date, time, node_id, location):\n raise Exception('Invalid SecuritySystemDB record!')\n\n self._cursor.execute(\"insert into {} values(?, ?, ?, ?)\".format(self._name),\n (date, time, location, node_id))", "def add(name, number, phonebook):\n\n phonebook_data = read_phonebook(phonebook)\n\n if phonebook_data.get(name):\n raise DuplicateError(\"This entry already exists. To make changes, \"\n \"use update_number or update_name.\")\n\n else:\n phonebook_data[name] = number\n print \"Entry added:\", name, number\n save(phonebook_data, phonebook)", "def add_row(self, row_id):", "async def monsave(self, ctx, *, entry):\r\n\r\n self.connect()\r\n discord_id = str(ctx.message.author.id)\r\n\r\n self.database.entries.insert_one({\r\n \"discord_id\": discord_id,\r\n \"entry\": entry\r\n })\r\n\r\n await ctx.send('You have successfully saved this entry in the Viking database.')", "def add(self, *args):\n pass", "def add(self, *args):\n pass", "def create_tag_with_entry(title):\n tag = Tag.objects.create(title=title)\n tag.save()\n tag.entry.add(1)\n return tag", "def _add_one(self, path):\n\n if not type(path).__name__ == \"Path\":\n path = Path(path)\n self._entries.append(path)\n self._clean = False\n self._current = 0", "def add_entries(self, entries):\n\n # If entries is a single entry, put in list for processing below\n if isinstance(entries, str):\n entries = [entries]\n\n for entry in entries:\n #Check if entries already exist\n try:\n self.entries[entry]\n # Entry doesn't already exist\n except KeyError:\n # Validate that entry is either an attribute of owner or in SystemLogEntries\n if not entry in SystemLogEntries and not entry in self.owner.__dict__:\n raise LogError(\"{0} is not an attribute of {1} or in SystemLogEntries\".\n format(entry, self.owner.name))\n # Add entry to self.entries dict\n self.entries[entry] = []\n\n # Entry exists\n else:\n # Issue warning and ignore\n warnings.warn(\"{0} is already an entry in log for {1}; use \\\"log_entry\\\" to add a value\".\n format(entry,self.owner.name))", "def add(self, key, value):\n self._data.add_last(self._Item(key, value))", "def add_new_object(cls, v, ins):\n cu = config.auth_obj().get_user(cls)\n v['creator'] = cu.get('nickname', '') or cu.get('email', '')\n d = {}\n [d.update({str(k): v[k]}) for k in v]\n o = cls.MODEL(**d)\n o.category_id = str(ins.content.get_unique_category_id())\n o.put()\n return o", "def feed(self, entry):\r\n pass", "def add_entry(self, key, value, depth):\n current = self.entries.get(key, None)\n if current is None or current.depth > depth:\n self.entries[key] = NodeEntry(key, value, depth)\n elif current.depth == depth:\n raise RuntimeError('Collision [depth=%d] for entry [type=%s]: %s' % (depth, self.nodetype, key))", "def new_entry(self, entry=\"entry\", program_name=\"pyFAI\",\n title=\"description of experiment\",\n force_time=None, force_name=False):\n\n if not force_name:\n nb_entries = len(self.get_entries())\n entry = \"%s_%04i\" % (entry, nb_entries)\n entry_grp = self.h5.require_group(entry)\n entry_grp.attrs[\"NX_class\"] = numpy.string_(\"NXentry\")\n entry_grp[\"title\"] = numpy.string_(title)\n entry_grp[\"program_name\"] = numpy.string_(program_name)\n if force_time:\n entry_grp[\"start_time\"] = numpy.string_(force_time)\n else:\n entry_grp[\"start_time\"] = numpy.string_(get_isotime())\n self.to_close.append(entry_grp)\n return entry_grp", "def createAtomEntry(self, postLink, atomNewEntry): #$NON-NLS-1$\r\n atomRequest = self._createNewEntryRequest(postLink, atomNewEntry)\r\n self._sendAtomEntry(atomRequest, atomNewEntry)\r\n atomEntry = atomRequest.getEntry()\r\n del atomRequest\r\n return atomEntry", "def add(self, filename, *args):\n return self.cmd('add', filename, *args)", "def create_entry(cls, title, date, timeSpent, learned, resources):\n try:\n with DATABASE.transaction():\n cls.create(\n title=title,\n date=date,\n timeSpent=timeSpent,\n learned=learned,\n resources=resources\n )\n except IntegrityError:\n raise ValueError(\"Entry already exists\")", "def add(self, node):\n if str(node.getPosition()) in self._history:\n # duplicate entry\n return\n self._history[str(node.getPosition())] = True\n self._insort(node)", "def new_entry(request, stock_id):\n stock= Stock.objects.get(id= stock_id)\n if request.method != 'POST':\n # No data submitted; create a blank form.\n form= EntryForm()\n else:\n # POST data submitted; process data.\n form= EntryForm(data= request.POST)\n if form.is_valid():\n new_entry= form.save(commit= False)\n new_entry.stock= stock\n new_entry.save()\n return redirect('stock_trackers:stock', stock_id= stock_id)\n\n # display a blank or invalid form\n context= {'stock':stock, 'form': form}\n return render(request, 'stock_trackers/new_entry.html', context)", "def add_entry():\n clear()\n name = input(\"Enter Full Name: \")\n task = input(\"Enter a task name: \")\n while True:\n try:\n time = input(\"Enter the minutes (ints only) to complete task: \")\n int(time)\n except ValueError:\n input(\"Be sure you are entering an integer. \")\n else:\n break\n quest = input(\"Would you like to add a note [N/y]: \").upper()\n note = \"\"\n if quest == \"Y\":\n print(\"Enter your note below.\")\n note = input(\":\")\n return db_insert(name, task, time, note)", "def db_insert(name, task, time, note):\n Entry.create(name=name,\n task=task,\n time=time,\n note=note)\n return main()", "def add(self):\n try:\n self.active_table.add_row(Row.Row([obj.get() for obj in self.enter_values]))\n self.parent.display_content()\n self.master.withdraw()\n except UnableToCastException as err:\n messagebox.showerror(\"Error\", err)", "def add_entries(\n self,\n entry,\n source,\n function,\n **args\n ):\n\n # check whether the stuff is already there\n if entry in self._header:\n answer = input(\"[?] Datatype has already been produced, do you want to override?\")\n if answer.lower() in ['y','yes']:\n pass\n else:\n pass\n else:\n # get the index of the source in self\n idx = self._header[source] \n\n # get the new index into the header\n self._header[entry.lower()] = max(self._header.values())+1\n self._alias[entry.lower()] = entry.lower()\n self.header[entry.lower()] = self._header[entry.lower()]\n\n # iterate over the data and create the new entry\n for key in self:\n \n # get the source\n s = self[key][idx]\n\n # transform s\n t = function(s,**args)\n\n # add\n self[key].append(t)", "def add(self, name, content):\n raise NotImplementedError", "def new_entry(title, content):\n\n title.strip # Remove the spaces from both sides.\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n return False\n default_storage.save(filename, ContentFile(content))\n return True" ]
[ "0.8038067", "0.78797853", "0.7829428", "0.762765", "0.761063", "0.75863564", "0.75329536", "0.7498819", "0.7392129", "0.7390942", "0.72157484", "0.72009754", "0.7183276", "0.71024626", "0.7099431", "0.7096168", "0.70957315", "0.70942044", "0.70942044", "0.70640934", "0.7045255", "0.69635797", "0.6929005", "0.69059294", "0.6883792", "0.686071", "0.6860232", "0.6852258", "0.68158746", "0.67963004", "0.6786736", "0.6758", "0.67552406", "0.6672242", "0.66686577", "0.66620404", "0.6657316", "0.6635667", "0.66247696", "0.66173816", "0.6601825", "0.6578675", "0.65461856", "0.65449715", "0.6521804", "0.65187186", "0.6493805", "0.6477094", "0.6437098", "0.643068", "0.64281446", "0.6426891", "0.64151466", "0.6362234", "0.63614196", "0.63596684", "0.6349026", "0.6347964", "0.6324286", "0.63103336", "0.63036317", "0.6293223", "0.6291091", "0.62851137", "0.62835693", "0.62826467", "0.627424", "0.62543607", "0.62487787", "0.62331516", "0.62331516", "0.62331516", "0.61953986", "0.6189177", "0.6176826", "0.6165911", "0.61604744", "0.61554915", "0.61302555", "0.6112912", "0.61089", "0.61089", "0.61084175", "0.6102505", "0.6077631", "0.6076738", "0.6066785", "0.6066672", "0.606087", "0.60492164", "0.6048197", "0.6035193", "0.6028984", "0.6028718", "0.60057276", "0.5989202", "0.5988021", "0.5985861", "0.59849894", "0.59771645", "0.597175" ]
0.0
-1
Add a single value or vector
def add_value(self, indep, key, value): self.add_dict(indep, {key: value})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, value):", "def add(self, value):\n pass", "def __add__(self, other):\n # other is a scalar\n if isinstance(other, (int, float, complex, Fraction)) and not isinstance(other, bool):\n return Vector([i + other for i in self.data], self.column)\n # other is a Vector\n elif isinstance(other, Vector):\n if len(self.data) != len(other):\n raise Exception('Vectors are not of equal length')\n elif self.column != other.column:\n raise Exception('Vectors are not of equal orientation')\n else:\n return Vector([self.data[i] + other.data[i] for i in range(len(self.data))], self.column)\n # other is not a scalar or a Vector\n else:\n raise Exception('Argument is not a number or a Vector') from TypeError", "def vector_add(v1, v2):\n return v1[0] + v2[0], v1[1] + v2[1]", "def __add__(self, other):\n if isinstance(other, (int, type(Zero()))):\n if (other == 0):\n return self\n self._check_vector(other)\n return Vector(self.args + other.args)", "def addInPlace(self, value1, value2):\n raise NotImplementedError", "def add(self, value):\n self.arr.append(value)", "def convert_add_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Add', **kwargs)", "def add(self, value):\n ind = self._ind % self.shape[0]\n self._values[ind] = value\n self._ind += 1\n self._cached = False", "def __add__(self, other: Any) -> TypeValue:\n if isinstance(other, np.ndarray):\n return other + float(self)\n\n return self._like_self_from_float(\n float(self) + self._other_same_units(other)\n )", "def __add__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other)\n if not mv:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return obj + other\n newValue = self.value + other.value\n\n return self._newMV(newValue)", "def add(self, value):\n if self._element_type is None:\n self._element_type = type(value)\n # check validation\n if isinstance(value, np.generic):\n # value = np.asscalar(value) # deprecated in numpy v1.16\n value = value.item()\n if not isinstance(value, self._element_type):\n raise TypeError(\n \"A %s parameter is expected, but received: %s\" % (str(self._element_type), str(type(value))))\n if value in self._innercontainer:\n warnings.warn(\"Adding element %s has already in the collection, skip.\" % (value.__str__()),\n category=RepeatElementWarning,\n stacklevel=3)\n else:\n self._innercontainer.append(value)\n return self", "def __iadd__(self, value):\n self.store.append(value)\n return self", "def add(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return FreeCAD.Vector(first.x+other.x, first.y+other.y, first.z+other.z)", "def __add__(self, other):\n if isinstance(other, Vector):\n a = self._ar + other._ar\n else:\n a = self._ar + numpy.array(other)\n return Vector(a)", "def __add__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.add)", "def __add__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Add, value)\n return out", "def add(self, value):\n return self.number + value", "def __iadd__(self, other: Any) -> None:\n self.add(item = other)\n return", "def __add__(self, other: Any) -> None:\n self.add(item = other)\n return", "def _add(self, type_or_name: str | type, value: Callable | type | tuple[bool, Callable]):\n key = type_or_name if isinstance(type_or_name, str) else type_or_name.__name__\n self._values[key] = value", "def __add__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n return self.__class__(self._real + value, self._imag)\r\n elif isinstance(value, self.__class__):\r\n return self.__class__(self._real + value._real, self._imag + value._imag)\r\n raise TypeError(\r\n 'unsupported operand type(s) for +: {!r} and {!r}'.format(\r\n self.__class__.__name__, value.__class__.__name__\r\n )\r\n )", "def __add__(self, v):\n return vector(self.x + v.x, self.y + v.y, self.z + v.z)", "def add(self, value):\n\n # if the value implements the RivineBinaryObjectEncoderBase class,\n # we ignore the underlying type and use the custom-defined logic\n # as provided by the RivineBinaryObjectEncoder.\n if isinstance(value, RivineBinaryObjectEncoderBase):\n value.rivine_binary_encode(encoder=self)\n return\n\n # try to rivbin-encode the value based on its python type\n if isinstance(value, bool):\n if value:\n self._data += bytearray([1])\n else:\n self._data += bytearray([0])\n elif isinstance(value, int):\n self.add_int64(value)\n else:\n # try to rivbin-encode the value as a slice\n try:\n self.add_slice(value)\n return\n except TypeError:\n pass\n raise Exception(\"cannot rivbin-encode value with unsupported type {}\".format(type(value)))", "def add(self, a, b):\n return a + b", "def __add__(self, vector):\n return self.translated(vector)", "def add(first, second):\n return first + second", "def __iadd__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__iop(other, operator.add)", "def add(self, vector):\n self.x += vector.x\n self.y += vector.y", "def add(self, number):\n\n return self.from_list([x+number for x in self.vector])", "def __add__(self, _v):\n\t\tif len(self) == len(_v):\n\t\t\tans = copy.deepcopy(self)\n\t\t\tfor i in range(0, self.n):\n\t\t\t\tans[i] += _v[i]\n\t\t\treturn ans", "def vec_add_scalar (x, c):\n return [x_i+c for x_i in x]", "def __add__(self, other):\n if len( self) != len(other):\n raise ValueError('Dimensions must match.')\n result = Vector(len(self))\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result", "def add(self, value: object) -> None:\n self.da.append(value)", "def add(self, value: object) -> None:\n self.da.append(value)", "def add(self, vec2):\n if type(vec2) != Vector:\n raise TypeError(\"Not a vector\")\n\n if len(vec2) != len(self):\n raise DifferentLengthVectors(self, vec2)\n\n return Vector(*[self[i]+vec2[i] for i in range(len(self))])", "def __add__(self, other):\n\t\tif len(self) != len(other):\n\t\t\traise ValueError('dimensions must agree')\n\t\tresult = Vector(len(self))\n\t\tfor j in range(len(self)):\n\t\t\tresult[j] = self[j] + other[j]\n\t\treturn result", "def __add__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Add.apply(self, other)", "def __add__(self, other):\n return Vec2d(self.v[0] + other[0], self.v[1] + other[1])", "def vector_add(a, b):\n assert(len(a) == len(b))\n\n from operator import add\n return tuple(map(add, a, b))", "def __add__(self, other):\n\t\ttry:\n\t\t\tval = self.val + other.val\n\n\t\t\t# Handle case when self.der or other.der contains None \n\t\t\t# i.e. self or other is a vector of scalars, not of Vars\n\t\t\tlen_self_der_shape = len(self.der.shape)\n\t\t\tlen_other_der_shape = len(other.der.shape)\n\n\t\t\tif not len_self_der_shape and len_other_der_shape:\n\t\t\t\tder = other.der\n\t\t\telif len_self_der_shape and not len_other_der_shape:\n\t\t\t\tder = self.der\n\t\t\telif not len_self_der_shape and not len_other_der_shape:\n\t\t\t\tder = None\n\t\t\telse:\n\t\t\t\tder = self.der + other.der\n\t\texcept AttributeError:\n\t\t\tval = self.val + other\n\t\t\tder = self.der\n\t\treturn Var(val, der)", "def __add__(self,l):\r\n\t\t\r\n\t\t# add\r\n\t\ta = self.add(l)\r\n\t\t\r\n\t\treturn a", "def __add__(self,other):\n self._obj['u'] += other._obj['u']\n self._obj['v'] += other._obj['v']\n return self._obj", "def jsonrpc_add(self, a, b):\n return a + b", "def add(value1, value2):\n return 1 / (1.0 / value1 + 1.0 / value2)", "def add(*args):\n\n # TODO: Fill sum with the correct value, based on the\n # args provided.\n sum = str(args[0] + args[1])\n return sum", "def add(self, elem: 'NestedInteger'):\n if self.value is None:\n self.value = [elem]\n elif self.isInteger():\n self.value = [NestedInteger(self.value), elem]\n else:\n self.value = [*self.value, elem]", "def __iadd__(self, other):\n\n if isinstance(other, float):\n self.iadd_scalar(other)\n else:\n self.iadd(other)", "def test_add_different_sizes():\n Vector(1.0) + Vector(2.0, 3.0)", "def __add__(self, element):\r\n self.elements += element", "def add(element):", "def add(self,*datas):\n\t\tresult = sum(datas)\n\t\treturn result", "def add(lhs, rhs):\n return _make.add(lhs, rhs)", "def add_vectors(v, u):\n return (v[0] + u[0], v[1] + u[1])", "def __add__(self, other):\n if len(self) != len(other):\n raise ValueError('As dimensões devem ser iguais')\n\n result = Vector(len(self)) # inicia um novo array do tamanho do próprio\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result", "def test_add_with_vec_argument(self):\n\n a = Vec3(2, 3, 4)\n b = Vec3(1, 2, 3)\n\n result = a + b\n\n expected_result = Vec3(3, 5, 7)\n\n self.assertEqual(result, expected_result)", "def add(self, x):\n if type(x) is int:\n self.real += x\n else:\n self.real = self.real + x.real\n self.imag = self.imag + x.imag", "def test_iadd_with_vec_argument(self):\n\n a = Vec3(2, 3, 4)\n b = Vec3(1, 2, 3)\n\n a += b\n\n expected_result = Vec3(3, 5, 7)\n\n self.assertEqual(a, expected_result)", "def smart_add(*args):\n result = 0\n for item in args:\n result += item\n\n return result", "def add( a, b ):\n return a + b", "def append_value(self, value):\n self.value += value", "def __add__(self, v2):\n\t\treturn Vect2D(self._vec+v2._vec)", "def add(x, y):\n\n return x + y", "def __add__(self, argument):\n try:\n argument = type(self)(argument)\n except Exception:\n return NotImplemented\n return type(self)(float(self) + float(argument))", "def addNumber(self,num):\n self.vec.append(num)", "def add(self, num) -> [int, float]:\n\n if Calculater.check_int_or_float(num):\n self.total += num\n\n else:\n self.check_type(input_type=type(num))", "def add(self, elem):", "def add(self, elem):", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(self, x, y):\n pass", "def sum(a,v):\n return a+v", "def add(self, value):\n self._resolve_copies()\n self.data.append(value)", "def add_one(x):\n return x + 1", "def __add__(self,other):\n return Vector(self.x + other.x, self.y+other.y)\n pass", "def add(self, value: float) -> None:\n self.rawValue = self.momentum * self.rawValue + (1 - self.momentum) * value\n self.i += 1", "def add(self, name, value) -> None:\n ...", "def add(self, val):\n self[val] += 1", "def union_add(this, that):\n return this.add(that, fill_value=0)", "def add_data_single(self, pt, val):\n raise NotImplementedError('Abstract Method')", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(x,y):\n return x + y", "def add(x,y):\n return x + y", "def addition(self, first_value, second_value):\n return first_value + second_value", "def __add__(self, other):\n\n return self._binary_elementwise_op(other, np.add)" ]
[ "0.77882046", "0.7528879", "0.71992666", "0.7022837", "0.68144745", "0.68144", "0.6740295", "0.66997826", "0.669777", "0.6694546", "0.6663471", "0.6647989", "0.66343087", "0.6605468", "0.6603293", "0.6592215", "0.65901595", "0.6578818", "0.65574723", "0.6531693", "0.65271676", "0.6526166", "0.64987725", "0.6476825", "0.6474855", "0.6448957", "0.6442347", "0.64358956", "0.6435697", "0.6426264", "0.642027", "0.641292", "0.64006793", "0.63766974", "0.63766974", "0.63736355", "0.63688445", "0.6365104", "0.63648844", "0.6359709", "0.6350387", "0.63423795", "0.6337388", "0.63286555", "0.63217574", "0.6315672", "0.63139", "0.6301437", "0.6300503", "0.6298798", "0.6296918", "0.6281857", "0.628078", "0.6277525", "0.6273765", "0.6266583", "0.62652236", "0.6261472", "0.62603563", "0.6251691", "0.6248576", "0.62443006", "0.6235776", "0.623155", "0.62312", "0.6222756", "0.6215995", "0.6215995", "0.6209132", "0.6209132", "0.6209132", "0.6209132", "0.6209132", "0.6209132", "0.6209132", "0.6209132", "0.6209132", "0.6209132", "0.6209132", "0.6209132", "0.6209132", "0.62029886", "0.61969703", "0.6183757", "0.6180554", "0.6170552", "0.61695635", "0.6165787", "0.61607426", "0.6157042", "0.61485803", "0.61484826", "0.61484826", "0.61484826", "0.61484826", "0.61484826", "0.61484826", "0.6135343", "0.6135343", "0.6130145", "0.6129043" ]
0.0
-1
Add a dictionary of dependent data
def add_dict(self, indep, dep): dfull = {IND: len(self), INDEP: indep.copy(), DEP: dep} validate_row(dfull) check_objects(dfull) if settings.CONVERT_SCALAR_ARRAYS: scalarise(dfull) if settings.PRINT_UPDATES: print(self.show([dfull])) self.append(dfull) self._combine(dfull)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(\n self, indep, key=None, value=None, dep=None, keys=None, values=None, **kwargs\n ):\n if key is not None and value is not None:\n if isinstance(key, str):\n self.add_value(indep, key, value)\n elif isinstance(key, list):\n self.add_array(indep, key, value)\n elif keys is not None and values is not None:\n self.add_array(indep, keys, values)\n elif dep is not None:\n self.add_dict(indep, dep)\n elif isinstance(key, dict):\n self.add_dict(indep, key)\n elif len(kwargs) > 0:\n self.add_dict(indep, kwargs)", "def add_value(self, indep, key, value):\n self.add_dict(indep, {key: value})", "def add_data_paths(self, path_dict: dict):\n self.data_dict.update(path_dict)", "def add_array(self, indep, keys, values):\n if np.ndim(values) > 1:\n values = orient(values, keys)\n dep = {k: v for k, v in zip(keys, values)}\n self.add_dict(indep, dep)", "def add_data(self, v, m, x, pos=1):\n if x is not None:\n if v in self.variables:\n if m in self.models:\n self.data.update({self.__gen_key(m, v, pos): x})\n self.pos.update({self.__gen_key(m, v, pos): pos})\n else:\n pass\n else:\n pass\n else:\n pass", "def addData(self, d):\n self.__populateDict(self._data, d)", "def add_dependency(self, dep):\n \n if dep == OrderedDict(): return False\n dep_key, dep_dict = dep.popitem()\n graph_list = self.get_dependencies(dep_key, self.graph, list())\n if graph_list != None:\n if graph_list != list():\n for graph in graph_list:\n graph[dep_key] = dep_dict\n else:\n self.graph[dep_key] = dep_dict\n return True\n return False", "def test_addDict(self):\n lidi = []\n lidi.append({'term': 'foo', 'tags': 'a', 'value': '1'})\n lidi.append({'term': 'bar', 'tags': 'a, b', 'value': '2'})\n lidi.append({'term': 'gnark', 'tags': 'a, c', 'value': '3'})\n self.g.add_dict(lidi)", "def add_data(self,**kwargs):\n self.given_data.update([x for x in kwargs.keys() if kwargs[x]!=None ])\n for i in range(len(self.attr)):\n param=self.attr[i]\n if param in kwargs and kwargs[param]!=None:\n if i==0 and not (0 <= kwargs['angle'] <= 90) :# atribute is angle\n raise ValueError('Angle should be between 0 an 90 degrees')\n elif i==7 and not (0 <= kwargs[param] <= 1):\n raise ValueError('Coefficient (kf) should be between 0 and 1')\n else:\n self.data[param]=kwargs[param]\n print('Added data to object. See current data by using print(object_name) or using check_data method')", "def require_data(self, typename):\n self.required_data_products.add(typename)", "def require_data(self, typename):\r\n self.required_data_products.add(typename)", "def _add_var_dict(self, var_dict=None):\n # add variable dictionaries, supplementing anything missing\n # with the standards defined in the json\n standards = self._load_json()\n if var_dict is None:\n var_dict = standards\n else:\n for level in standards.keys():\n if level not in var_dict.keys():\n var_dict[level] = standards[level].copy()\n else:\n for key in standards[level].keys():\n if key not in var_dict[level].keys():\n var_dict[level][key] = standards[level][key]\n self._map_cols = var_dict['columns']\n self._map_codes = var_dict['key_codes']\n self._variable_dict = var_dict\n self._standards = standards", "def add(self,x,y):\n # assert that independent variable is as long as each of the\n # dependent variables\n for ch in self.chs:\n assert len(x) == len(y[ch-1])\n apply(Storage.add, (self,[x,y]))", "def add_data(self, data: List[dict]):\n raise NotImplementedError()", "def add_records(self, data: dict, execution_context: dict):", "def _add_dependency(self, dep):\n self.dependency.append(dep)", "def add_dependency(session, data, username='system_user'):\n session = validate_session(session)\n failed_count = 0\n for deps in data['data']:\n pkg_id = deps['toppatch_id']\n for dep in deps['dependencies']:\n dep_exists = session.query(LinuxPackageDependency).\\\n filter(LinuxPackageDependency.toppatch_id == pkg_id).\\\n filter(LinuxPackageDependency.dependency == dep).first()\n if not dep_exists:\n try:\n dep_add = LinuxPackageDependency(pkg_id, dep)\n session.add(dep_add)\n session.commit()\n except Exception as e:\n session.rollback()\n failed_count += 1", "def addDic(dic, elt):\n pass", "def add_terms_data(self, terms: Dict[datetime, List[dict]]):\n raise NotImplementedError()", "def addData(self,data):\n\t\tif isinstance(data,list):\n\t\t\tif isinstance(data[0],dict):\n\t\t\t\tself.data.extend(data)\n\t\t\telif isinstance(data[0],list):\t\n\t\t\t\tfor r in data:\n\t\t\t\t\tacc= dict()\n\t\t\t\t\tfor h in self.header:\n\t\t\t\t\t\tacc[h]=r[self.header.index(h)]\t\n\t\t\t\t\tself.data.append(acc) \n\t\t\telse:\n\t\t\t\tself.data.append(dict(zip(self.header,data)))\n\t\telif isinstance(data,dict):\n\t\t\tself.data.append(data)\n\t\telse:\n\t\t\traise datatools.WrongTypeError(data)", "def add(self, other):\n\n def merge_dicts(d1, d2):\n \"\"\"\n Merge two dictionaries\n\n param d1: dictionary changed in place to have combined values\n type d1: dictionary(key -> set)\n param d2: dictioanry to be merged\n type d2: dictionary(key -> set)\n \"\"\"\n for key,value in d2.items():\n if key not in d1:\n d1[key] = value\n else:\n d1[key] |= value\n \n self.num_documents += other.num_documents\n self.num_expressions += other.num_expressions\n self.global_expressions += other.global_expressions\n self.expressions_with_e += other.expressions_with_e\n self.num_keywords += other.num_keywords\n merge_dicts(self.missing_tags, other.missing_tags)\n merge_dicts(self.problem_files, other.problem_files)", "def _add_dictionary(self, current, added):\n for key in added:\n if key in current and isinstance(current[key], collections.Mapping):\n self._add_dictionary(current[key], added[key])\n else:\n current[key] = added[key]", "def add_depend(self, data):\n try:\n self._session.add(StepDependencyEntity(\n child_id=data['child_id'],\n parent_id=data['parent_id']\n ))\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return True", "def learn(primer, dependencies):\n knowledge_map = defaultdict(dict)\n for row in primer:\n for dvcol, ivcol in dependencies.items():\n # knowledge of the dependent value is mapped to the value\n # of the independent value col\n #\n # notice:\n # - if the knowledge_map has no entry for the dv col,\n # a dict is constructed automatically\n # - the value of the iv col is used\n # - overwrites the previous known relationship\n knowledge_map[dvcol][row[ivcol]] = row[dvcol]\n return knowledge_map", "def add(self, data_def_request):\n logger.debug(\"Adding a data_def request {}\"\n .format(data_def_request.to_dict()))\n self._data_defs.append(data_def_request)", "def data_dict_add_total(data_dict, sum_args, feat_name):\n for key in data_dict:\n data_dict[key][feat_name] = 0\n for feat in sum_args:\n if data_dict[key][feat] != 'NaN':\n data_dict[key][feat_name] += data_dict[key][feat]", "def set_dependencies(self,dependency_list):\n\t\tdeps = {}\n\t\tfor relation in dependency_list:\n\t\t\tself.nr_of_deps += 1\n\t\t\t# Find the type of relation\n\t\t\trel = re.match('[a-z\\_]*(?=\\()',relation).group(0)\n\t\t\t# Find head and dependent\n\t\t\thead = int(re.search('(?<=-)[0-9]*(?=, )',relation).group(0))\n\t\t\tdep = int(re.search('(?<=-)[0-9]*(?=\\)$)', relation).group(0))\n\t\t\t# Set head position and create\n\t\t\t#dictinary entries\n\t\t\tif head == 0:\n\t\t\t\tself.head_pos = dep\n\t\t\telse:\n\t\t\t\tdeps[head] = deps.get(head,[])\n\t\t\t\tdeps[head].append([dep,rel])\n\t\t#set headpos to first head in dependency list if sentence has no head\n\t\tif dependency_list and not self.head_pos:\n\t\t\tfirst_head = int(re.search('(?<=-)[0-9]*(?=, )',dependency_list[0]).group(0))\n\t\t\tself.head_pos = first_head\n\t\treturn deps", "def __init__(self,**kwargs):\n self.attr = ['angle','width','height','m','Fg','Fs','Fd','kf','Ff']\n # attributes of the incline in order: angle,width,height, mass,Fg(gravity force),Fs(statical force), Fd (dynamical force),kf(friction coefficient), Ff(friction force)\n self.data = {param: None for param in self.attr}#initialazing data\n self.given_data = set() #set of data given by user\n self.add_data(**kwargs)", "def add_info():\r\n car_order_list = []\r\n order_info = {}\r\n\r\n order_type = get_order_type()\r\n customer_name = get_name()\r\n phone_number = get_phone_number()\r\n address = get_address()\r\n cars_on_order = get_order(car_order_list)\r\n subsidy_of_cars = total_car_subsidy(cars_on_order)\r\n order_subsidy = event_charge(order_type)\r\n total_subsidy = get_total_subsidy(subsidy_of_cars, order_subsidy)\r\n\r\n order_info[\"Name\"] = customer_name\r\n order_info[\"Phone number\"] = phone_number\r\n order_info[\"Address\"] = address\r\n order_info[\"Order type\"] = order_type\r\n order_info[\"Cars on order\"] = cars_on_order\r\n order_info[\"Subsidy of cars\"] = subsidy_of_cars\r\n order_info[\"Order subsidy\"] = order_subsidy\r\n order_info[\"Total Subsidy\"] = total_subsidy\r\n\r\n # order_dict[(len(order_dict)+1)] = order_info\r\n\r\n return order_info", "def add_features(data_dict, features_list):\n\n for name in data_dict:\n # add features for the log values of the financial data\n for feat in features_financial:\n try:\n data_dict[name][feat + '_log'] = math.log(data_dict[name][feat] + 1)\n except:\n data_dict[name][feat + '_log'] = 'NaN'\n\n # Add ratio of POI messages to total.\n try:\n total_messages = data_dict[name]['from_messages'] + data_dict[name]['to_messages']\n poi_related_messages = data_dict[name][\"from_poi_to_this_person\"] +\\\n data_dict[name][\"from_this_person_to_poi\"] +\\\n data_dict[name][\"shared_receipt_with_poi\"]\n poi_ratio = 1.* poi_related_messages / total_messages\n data_dict[name]['poi_ratio_messages'] = poi_ratio\n except:\n data_dict[name]['poi_ratio_messages'] = 'NaN'\n\n return data_dict", "def add_nested_dependencies(dependency_map: Dict[str, Set[str]]) -> Dict[str, Set[str]]:\n # path can be at most as long as the total number of items\n for _ in range(len(dependency_map)):\n for dependencies in dependency_map.values():\n for dependent_key in dependencies.copy():\n dependencies.update(dependency_map[dependent_key])", "def add_modifications(self, dset):\n dset_keys = [\"entity_ids\", \"comp_ids\"]\n for key in dset_keys:\n self.model_dict[key] = dset[key]\n self.model_dict[\"code_version\"] = np.array(mbgdml_version)\n self.model_dict[\"md5\"] = np.array(self.md5)", "def data_dict_update(self, change):\n self.data_dict = change['value']", "def addToExtra(self,key,val):\n if self.extra == None: \n self.extra = {} \n self.extra[key] = val", "def update_depending(self, new_depending_values):\n self._check_missing_fields(set(self._depending_field_names), set(new_depending_values.keys()))\n\n for field in self._depending_field_names:\n self._depending_fields[field] = new_depending_values[field]", "def add_package(dictionary: dict, string_label: str) -> dict:\n if dictionary:\n for ii_key in dictionary.keys():\n dictionary[ii_key][\"package\"] = string_label\n else:\n print(\"Warning - empty dictionary was passed to be appended.\")\n return dictionary", "def add_data(self, in_data):\n old_data = {}\n for field in self.fields:\n # ToDo - might be a better way to determine the fieldname\n if field in in_data:\n if field in self.data:\n old_data = dict(self.data)\n self.data = {}\n\n self.data[field] = in_data[field]\n self.data['usUnits'] = in_data['usUnits']\n self.data['dateTime'] = in_data['dateTime']\n return old_data", "def append_data(dic,key,value):\n if(dic.has_key(key)):\n dic[key].append(value)\n else:\n dic[key] = [value]\n return dic", "def add_to_dict ( self, key_0, key_1, list_2 ):\n\n old_value_0 = self._dd_dict.get( key_0 ) # value for key 0\n if old_value_0 is None:\n self._dd_dict[ key_0 ] = { key_1: list_2 }\n else:\n # need to merge a value into old_value_0 which is a dict in the self....\n old_value_0[ key_1 ] = list_2\n\n self.print_dd_dict()\n\n\n print( self._dd_dict )\n # could return but it is a mutate\n return", "def add_data(self, op):\n self.__data += [AssemblerVariable(op)]\n self.refresh_name_label()\n self.refresh_name_end_label()", "def add(self, name, variable_classification):\r\n if name in self.__table__:\r\n logger.error(\"Tried to add duplicate key : \" , name)\r\n \r\n if isinstance(variable_classification, Global_Variable):\r\n self.__table__[name] = variable_classification\r\n elif isinstance(variable_classification, Local_Variable):\r\n self.__table__[name] = variable_classification\r\n elif isinstance(variable_classification, Procedure_Variable):\r\n self.__table__[name] = variable_classification\r\n elif isinstance(variable_classification, Constant_Variable):\r\n self.__table__[name] = variable_classification\r\n elif isinstance(variable_classification, Function_Variable):\r\n self.__table__[name] = variable_classification\r\n\r\n else:\r\n raise RuntimeError(\"Attempt to add bad value to symbol table\")", "def _add_attr_requirement(self, mother_element: GraphElement,\n daughter_element: GraphElement) -> None:\n if not daughter_element in self.attr_requirements:\n self.attr_requirements[daughter_element] = {}\n requirement_num = len(self.attr_requirements[daughter_element])\n requirement_name = f'arg{requirement_num}'\n self.attr_requirements[daughter_element][requirement_name] = mother_element", "def __iadd__(self, other):\n if not isinstance(other, dict):\n msg = 'Can not concatenate Dict and {}'.format(type(other))\n raise TypeError(msg)\n for key, val in other.items():\n if key in self:\n self._append_key(key, val)\n else:\n self[key] = val\n return self", "def new_data(first: dict, second: dict, changeables: tuple):\n for name, field in first.items():\n if name not in changeables:\n second[name] = field", "def add_attributes(data, **kwargs):\n for key in kwargs:\n data[key] = kwargs[key]", "def AddState(self, **dic):\n\n state = State()\n state.name = dic['name']\n state.external_name = dic['external']\n\n state_transitions = []\n\n for (condition, destination) in dic['transitions']:\n transition = Transition(condition, state.name, destination)\n state_transitions.append(transition)\n\n self.transitions.extend(state_transitions)\n state.transitions = state_transitions\n self.states[state.name] = state", "def setup(self):\n\n for name, infos in Rt.geom_dict.items():\n if name in Rt.optim_var_dict:\n self.add_input(name, val=infos[1][0])", "async def add_dict(self, dic):\n for key in dic:\n await self.set(key, dic[key])", "def add_variables(ds, variables_dict, dim_sizes_dict):\n\n du = DatasetUtil()\n\n for variable_name in variables_dict.keys():\n\n variable_attrs = variables_dict[variable_name]\n\n # Check variable definition\n TemplateUtil._check_variable_definition(variable_name, variable_attrs)\n\n # Unpack variable attributes\n dtype = variable_attrs[\"dtype\"]\n dim_names = variable_attrs[\"dim\"]\n attributes = deepcopy(variable_attrs[\"attributes\"]) if \"attributes\" in variable_attrs else None\n\n # Determine variable shape from dims\n try:\n dim_sizes = TemplateUtil._return_variable_shape(dim_names, dim_sizes_dict)\n except KeyError:\n raise KeyError(\"Dim Name Error - Variable \" + variable_name + \" defined with dim not in dim_sizes_dict\")\n\n # Create variable and add to dataset\n if dtype == \"flag\":\n flag_meanings = attributes.pop(\"flag_meanings\")\n variable = du.create_flags_variable(dim_sizes, meanings=flag_meanings,\n dim_names=dim_names, attributes=attributes)\n\n else:\n variable = du.create_variable(dim_sizes, dim_names=dim_names,\n dtype=dtype, attributes=attributes)\n\n if \"encoding\" in variable_attrs:\n du.add_encoding(variable, **variable_attrs[\"encoding\"])\n\n ds[variable_name] = variable\n\n return ds", "def __add__(self, other):\n var_set = set(self.variables.keys()) | set(other.variables.keys())\n result = {}\n for v in var_set:\n a, b = self.variables.get(v, 0), other.variables.get(v, 0)\n a, b = self._broadcast(a, b)\n result[v] = a + b\n return MultivariateDerivative(result)", "def data_append(ctx, data, key, value):\n assert isinstance(ctx, Wtp)\n assert isinstance(data, dict)\n assert isinstance(key, str)\n\n if key in str_keys:\n assert isinstance(value, str)\n elif key in dict_keys:\n assert isinstance(value, dict)\n if key == \"tags\":\n if value == \"\":\n return\n lst = data.get(key, [])\n lst.append(value)\n data[key] = lst", "def add_requirements(self, fgraph):\r\n pass", "def data():\n print (\"&\")\n res = {}\n\t\n # Load Data\n with open(DATA_PATH_TRAIN, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t \t\n with open(DATA_PATH_TEST, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t\t\t\n with open('tasks/env/data/data.json', 'w') as outfile:\n json.dump(res, outfile)", "def auxiliary(dict_):\n dict_['AUX'] = {}\n if dict_['DIST']['coeff'] == [0.0] * len(dict_['DIST']['coeff']):\n is_deterministic = True\n else:\n is_deterministic = False\n\n for key_ in ['UNTREATED', 'TREATED', 'COST', 'DIST']:\n if key_ in ['UNTREATED', 'TREATED', 'COST']:\n dict_[key_]['all'] = dict_[key_]['coeff']\n dict_[key_]['all'] = np.array(dict_[key_]['all'])\n else:\n dict_[key_]['all'] = dict_[key_]['coeff']\n dict_[key_]['all'] = np.array(dict_[key_]['all'])\n\n # Number of covariates\n num_covars_out = len(dict_['TREATED']['all'])\n num_covars_cost = len(dict_['COST']['all'])\n\n dict_['AUX']['num_covars_out'] = num_covars_out\n dict_['AUX']['num_covars_cost'] = num_covars_cost\n\n # Number of parameters\n dict_['AUX']['num_paras'] = 2 * num_covars_out + num_covars_cost + 2 + 2\n\n # Starting values\n dict_['AUX']['init_values'] = []\n\n for key_ in ['TREATED', 'UNTREATED', 'COST', 'DIST']:\n dict_['AUX']['init_values'] += dict_[key_]['all'].tolist()\n\n for j in sorted(dict_[key_].keys()):\n if j in ['all', 'types']:\n pass\n else:\n del dict_[key_][j]\n dict_['DETERMINISTIC'] = is_deterministic\n dict_ = check_types(dict_)\n\n return dict_", "def add_data(g):\n param = Parameters(g)\n\n # Permeability\n param.set_tensor(\"flow\", tensor.SecondOrderTensor(g.dim, np.ones(g.num_cells)))\n\n # Source term\n source = np.array([rhs(*pt) for pt in g.cell_centers.T])\n param.set_source(\"flow\", g.cell_volumes * source)\n\n # Boundaries\n bound_faces = g.tags[\"domain_boundary_faces\"].nonzero()[0]\n bound_face_centers = g.face_centers[:, bound_faces]\n\n labels = np.array([\"dir\"] * bound_faces.size)\n\n bc_val = np.zeros(g.num_faces)\n bc_val[bound_faces] = np.array([solution(*pt) for pt in bound_face_centers.T])\n\n param.set_bc(\"flow\", BoundaryCondition(g, bound_faces, labels))\n param.set_bc_val(\"flow\", bc_val)\n\n return {\"param\": param}", "def add_to_dict(param_dict):\n ### Sample - Int\n sample_s = param_dict['ml_args'].sample_s\n ### Sample - Mr\n sample_Mr = param_dict['ml_args'].sample_Mr\n ## Sample volume\n # Units (Mpc/h)**3\n volume_sample = { '18': 37820 / 0.01396,\n '19': 6046016.60311 ,\n '20': 2.40481e7 ,\n '21': 8.79151e7 }\n vol_mr = volume_sample[sample_s]\n ##\n ## Choice of Centrals and Satellites\n cens = int(1)\n sats = int(0)\n ## Other constants\n # Speed of light - In km/s\n speed_c = ac.c.to(u.km/u.s).value\n ## Number of CPU's to use\n cpu_number = int(cpu_count() * param_dict['cpu_frac'])\n ##\n ## Plotting constants\n plot_dict = { 'size_label':23,\n 'size_title':25,\n 'color_ham' :'red',\n 'color_dyn' :'blue'}\n ##\n ## Catalogue Prefix string\n catl_str_fig = param_dict['ml_args'].catl_alg_comp_fig_str()\n ##\n ## Saving to `param_dict`\n param_dict['sample_s' ] = sample_s\n param_dict['sample_Mr' ] = sample_Mr\n param_dict['vol_mr' ] = vol_mr\n param_dict['cens' ] = cens\n param_dict['sats' ] = sats\n param_dict['speed_c' ] = speed_c\n param_dict['cpu_number' ] = cpu_number\n param_dict['plot_dict' ] = plot_dict\n param_dict['catl_str_fig'] = catl_str_fig\n\n return param_dict", "def add_dyn(model, data):\n\n if model == 'GENCLS':\n busidx = data[0]\n id = data[2]\n data = data[3:]\n if busidx in Settings.SynStore.keys():\n dev = 'PV'\n gen_idx = busidx\n elif busidx in Settings.SWStore.keys():\n dev = 'SW'\n gen_idx = busidx\n else:\n raise KeyError\n # todo: check xl\n param = {'bus': busidx,\n 'gen': gen_idx,\n 'Sn': Settings.SynStore[busidx]['Sn'],\n 'Vn': Settings.SynStore[busidx]['Vn'],\n 'type': 2,\n 'xd1': Settings.SynStore[busidx]['xs'],\n 'ra': Settings.SynStore[busidx]['ra'],\n 'M': 2 * data[0],\n 'D': data[1],\n 'xl': 0, # TODO: retrieve `xl` from raw file\n 'status': 1, # TODO: retrieve `u` from raw file\n }\n\n psatlist = [busidx, param['Sn'], param['Vn'], Settings.freq, param['type'], param['xl'], param['ra'],\n param['xd1'], EMPTY, EMPTY, EMPTY, EMPTY, EMPTY,\n EMPTY, EMPTY, EMPTY, EMPTY, param['M'], param['D'],\n EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, param['status']]\n Settings.Syn.append(psatlist)\n Settings.DevicesAtBus[busidx].append({'Bus': busidx , 'Id' : id})\n\n\n elif model == 'GENROU':\n busidx = data[0]\n id = data[2]\n data = data[3:]\n if busidx in Settings.SynStore.keys():\n dev = 'PV'\n gen_idx = busidx\n\n elif busidx in Settings.SW.keys():\n dev = 'SW'\n gen_idx = busidx\n else:\n raise KeyError\n param = {'bus': busidx,\n 'gen': gen_idx,\n 'Sn': Settings.SynStore[busidx]['Sn'],\n 'Vn': Settings.SynStore[busidx]['Vn'],\n 'ra': Settings.SynStore[busidx]['ra'],\n 'type': 6,\n 'Td10': data[0],\n 'Td20': data[1],\n 'Tq10': data[3],\n 'Tq20': data[4],\n 'M': 2 * data[4],\n 'D': data[5],\n 'xd': data[6],\n 'xq': data[7],\n 'xd1': data[8],\n 'xq1': data[9],\n 'xd2': data[10],\n 'xq2': data[10], # xd2 = xq2\n 'xl': data[11],\n 'status': 1\n }\n\n psatlist = [busidx, param['Sn'], param['Vn'], Settings.freq, param['type'], param['xl'], param['ra'],\n param['xd'], param['xd1'], param['xd2'], param['Td10'], param['Td20'], param['xq'],\n param['xq1'], param['xq2'], param['Tq10'], param['Tq20'], param['M'], param['D'],\n EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, EMPTY, param['status']]\n Settings.Syn.append(psatlist)\n Settings.DevicesAtBus[model.lower()].append({'Bus': busidx , 'Id' : id})\n\n\n # CONFIRM EXCITER DATA, NEED SOME CALCULATIONS\n elif model == 'EXST1':\n busidx = data[0]\n id = data[2]\n data = data[3:]\n if busidx in Settings.SynStore.keys():\n dev = 'PV'\n gen_idx = busidx\n\n elif busidx in Settings.SWStore.keys():\n dev = 'SW'\n gen_idx = busidx\n\n else:\n raise KeyError\n\n param = {'bus': busidx,\n 'gen': gen_idx,\n 'extype': 1, # Type 1 (PSAT TYPE II Params List)\n 'MTC': data[0], # Measurement Time Constant T_r\n 'Vimax': data[1], # Voltage Integrator Output Max\n 'Vimin': data[2],\n 'Tc': data[3], # AVR Lead Time Constant Tc\n 'Tb': data[4], # AVR Lag\n 'excGain': data[5], # K_a(Exciter Gain)\n 'regGain': EMPTY,\n 'Ta': data[6], # Voltage regulator time constant\n 'maxregV': data[7],\n 'minregV': data[8],\n 'Kc': data[9], # Rectifier Loading factor\n 'Kf': data[10], # Field Voltage Feedback Gain(Stabilizer Gain)\n 'Tf': data[11], # Field Voltage Time Constant(Stabilizer Time Constant\n '1stX': data[3], # 1st Pole(REGULATOR?)\n '1st0': data[4], # 1st Zero(REGULATOR?)\n '2ndX': EMPTY,\n '2nd0': EMPTY,\n 'FCTC': EMPTY, # Field Circuit Time Constant T_e\n '1stCC': EMPTY, # 1st Ceiling Coefficient\n '2ndCC': EMPTY,\n 'status': 1\n }\n psatlist = [gen_idx, param['extype'], param['maxregV'], param['minregV'], param['excGain'],\n param['Ta'], param['Kf'], param['Tf'], EMPTY, param['FCTC'],\n param['MTC'], param['1stCC'], param['2ndCC'], param['status'], 0]\n\n Settings.Exc.append(psatlist)\n Settings.DevicesAtBus[model.lower()].append({'Bus': busidx , 'Id' : id})\n\n\n elif model == 'IEEEX1':\n busidx = data[0]\n id = data[2]\n data = data[3:]\n if busidx in Settings.SynStore.keys():\n dev = 'PV'\n gen_idx = busidx\n\n elif busidx in Settings.SWStore.keys():\n dev = 'SW'\n gen_idx = busidx\n\n else:\n raise KeyError\n\n param = {'bus': busidx,\n 'gen': gen_idx,\n 'extype': 1, # Type 1\n 'MTC': data[0], # Measurement Time Constant T_r\n 'excGain': data[1], # KA\n 'regGain': EMPTY,\n 'Ta': data[2], # Exciter Time Constant\n 'Tb': data[3], # AVR Lag Time Constant\n 'Tc': data[4], # AVR Lead Time Constant\n 'maxregV': data[5],\n 'minregV': data[6],\n 'Ke': data[7], # Field Circuit Integral Deviation\n 'FCTC': data[8], # Field Circuit Time Constant T_e\n 'Kf': data[9], # Rate Feedback Gain(PSAT Stabilizer Gain\n 'Tf1': data[10], # Stabilizer Time Constant\n 'Switch': 0,\n 'E1': data[12], # Exciter Flux at Knee Curve(Saturation Voltage Point 1)\n 'SE_E1': data[13], # Saturation Factor\n 'E2': data[14],\n 'SE_E2': data[15],\n '1stX': EMPTY, # 1st Pole(REGULATOR?)\n '1st0': EMPTY, # 1st Zero(REGULATOR?)\n '2ndX': EMPTY,\n '2nd0': EMPTY,\n '1stCC': EMPTY, # 1st Ceiling Coefficient(PSAT Ae..COMPUTE)\n '2ndCC': EMPTY, # 2nd Ceiling Coefficient(PSAT Be..COMPUTE)\n 'status': 1\n }\n psatlist = [gen_idx, param['extype'], param['maxregV'], param['minregV'], param['regGain'],\n param['Tc'], param['Tb'], param['2ndX'], param['2nd0'], param['FCTC'],\n param['MTC'], param['1stCC'], param['2ndCC'], param['status'], 0]\n\n Settings.Exc.append(psatlist)\n Settings.DevicesAtBus[model.lower()].append({'Bus': busidx , 'Id' : id})\n\n\n elif model == 'ESST3A':\n busidx = data[0]\n id = data[2]\n data = data[3:]\n if busidx in Settings.SynStore.keys():\n dev = 'PV'\n gen_idx = busidx\n\n elif busidx in Settings.SWStore.keys():\n dev = 'SW'\n gen_idx = busidx\n\n else:\n raise KeyError\n\n param = {'bus': busidx,\n 'gen': gen_idx,\n 'extype': 3, # Type (Closest Brushless Type AC1A)\n 'MTC': data[0], # Measurement Time Constant T_r\n 'Vimax': data[1], # Voltage Integrator Output Max\n 'Vimin': data[2],\n 'Km': data[3],\n 'Tc': data[4], # Lead Time Constant\n 'Tb': data[5], # Lag Time Constant\n 'regGain': data[6],\n 'Ta': data[7], # Voltage regulator time constant\n 'maxregV': data[8],\n 'minregV': data[9],\n 'Kg': data[10],\n 'Kp': data[11], # Voltage Reg. Proport. Gain\n 'Ki': data[12], # Voltage Reg. Int. Gain\n 'Vbmax': data[13], # Voltage Base\n 'Kc': data[14], # Rectifier Loading factor\n 'xl': data[15], # leakage\n 'Vgmax': data[16], # ???\n 'theta_p': data[17], # ???\n 'Tm': data[18], # ???\n 'Vmmax': data[19], # ???\n 'Vmmin': data[20], # ???\n '1stX': EMPTY, # 1st Pole\n '1st0': EMPTY, # 1st Zero\n '2ndX': EMPTY,\n '2nd0': EMPTY,\n 'FCTC': EMPTY, # Field Circuit Time Constant\n '1stCC': EMPTY, # 1st Ceiling Coefficient\n '2ndCC': EMPTY,\n 'status': 1\n }\n psatlist = [gen_idx, param['extype'], param['maxregV'], param['minregV'], param['regGain'],\n param['Tc'], param['Tb'], param['2ndX'], param['Ta'], param['FCTC'],\n param['MTC'], param['1stCC'], param['2ndCC'], param['status'], 0]\n\n Settings.Exc.append(psatlist)\n Settings.DevicesAtBus[model.lower()].append({'Bus': busidx , 'Id' : id})\n\n\n elif model == 'ESST4B':\n busidx = data[0]\n id = data[2]\n data = data[3:]\n if busidx in Settings.SynStore.keys():\n dev = 'PV'\n gen_idx = busidx\n\n elif busidx in Settings.SWStore.keys():\n dev = 'SW'\n gen_idx = busidx\n\n else:\n raise KeyError\n\n param = {'bus': busidx,\n 'gen': gen_idx,\n 'extype': 4, # Type ST4B\n 'maxregV': data[3],\n 'minregV': data[4],\n 'Ta': data[5], # Voltage regulator time constant(Bridge)\n 'Kpm': data[6], # ???\n 'Kim': data[7], # ???\n 'Vmmax': data[8], # ???\n 'Vmmin': data[9],\n 'Kg': data[10], # ???\n 'Kp': data[11], # Voltage Reg. Proport. Gain\n 'Ki': data[12], # Voltage Reg. Integ. Gain\n 'Vbmax': data[13], # Voltage Base\n 'Kc': data[14], # Rectifier Loading factor\n 'xl': data[15], # leakage\n 'theta_p': data[16], # ???\n 'regGain': EMPTY, # ADD UP K'S Gain??\n '1stX': EMPTY, # 1st Pole\n '1st0': EMPTY, # 1st Zero\n '2ndX': EMPTY,\n '2nd0': EMPTY,\n 'FCTC': EMPTY, # Field Circuit Time Constant\n 'MTC': EMPTY, # Measurement Time Constant\n '1stCC': EMPTY, # 1st Ceiling Coefficient\n '2ndCC': EMPTY,\n 'status': 1\n }\n psatlist = [gen_idx, param['extype'], param['maxregV'], param['minregV'], param['regGain'],\n param['1stX'], param['1st0'], param['2ndX'], param['2nd0'], param['FCTC'],\n param['MTC'], param['1stCC'], param['2ndCC'], param['status'], 0]\n\n Settings.Exc.append(psatlist)\n Settings.DevicesAtBus[model.lower()].append({'Bus': busidx , 'Id' : id})\n\n\n elif model == 'IEEEG1':\n Settings.govcount += 1\n busidx = data[0]\n id = data[2]\n data = data[5:]\n if busidx in Settings.SynStore.keys():\n dev = 'PV'\n gen_idx = busidx\n\n elif busidx in Settings.SWStore.keys():\n dev = 'SW'\n gen_idx = busidx\n\n else:\n raise KeyError\n\n param = {'bus': busidx,\n 'gen': gen_idx,\n 'type': 1,\n 'ref_speed': EMPTY,\n 'Droop': EMPTY,\n 'maxTO': EMPTY,\n 'minTO': EMPTY,\n 'govTC': EMPTY,\n 'servoTC': EMPTY,\n 'tgTC': EMPTY, # Transient Gain Time Constant\n 'pfTC': EMPTY, # Power Fraction Time Constant\n 'rTC': EMPTY, # Reheat Time Constant\n 'status': 1\n }\n psatlist = [busidx, param['type'], param['ref_speed'], param['Droop'], param['maxTO'], param['minTO'],\n param['govTC'], param['servoTC'], param['tgTC'], param['pfTC'], param['rTC'], param['status']]\n\n Settings.Tg.append(psatlist)\n Settings.DevicesAtBus[model.lower()].append({'Bus': busidx , 'Id' : id})\n\n\n elif model == 'IEE2ST':\n\n Settings.pss2count += 1\n busidx = data[0]\n id = data[2]\n data = data[3:]\n if busidx in Settings.SynStore.keys():\n dev = 'PV'\n gen_idx = busidx\n\n elif busidx in Settings.SWStore.keys():\n dev = 'SW'\n gen_idx = busidx\n\n else:\n raise KeyError\n\n param = {'bus': busidx,\n 'gen': gen_idx,\n 'type': 1,\n 'AVR': Settings.pss2count,\n 'PSSmodel': 1,\n 'PSSin': 1,\n 'Vmaxsout': EMPTY,\n 'Vminsout': EMPTY,\n 'Kw': EMPTY, # Stabilizer Gain\n 'Tw': EMPTY, # Washout Time\n 'T1': data[2], # 1st Lead\n 'T2': data[3], # 1st Lag\n 'T3': data[4], # 2nd\n 'T4': data[5],\n 'T5': data[6], # 3rd\n 'T6': data[7],\n 'T7': data[8], # Filter Lead Time Constant\n 'T8': data[9], # Filter Lag\n 'T9': data[10], # Freq Branch Time Constant\n 'T10': data[11], # Power Branch Time Constant\n 'Lsmax': data[12],\n 'Lsmin': data[13],\n 'Vcu': data[14],\n 'Vcl': data[15],\n 'Ka': EMPTY, # Gain for additional signal\n 'Ta': EMPTY, # Time constant for additional signal\n 'Kp': EMPTY, # Gain for active power\n 'Kv': EMPTY, # Gain for bus voltage magnitude\n 'Vamax': EMPTY, # additonal signal(anti-windup)\n 'Vamin': EMPTY, # additional signal(windup)\n 'Vsmax': EMPTY, # max output(no additonal)\n 'Vsmin': EMPTY, # min output(no additional)\n 'FVthresh': EMPTY,\n 'RSthresh': EMPTY,\n 'switch': 1,\n 'status': 1,\n }\n\n psatlist = [param['AVR'], param['PSSmodel'], param['PSSin'], param['Vsmax'], param['Vsmin'], param['Kw'],\n param['Tw'], param['T1'], param['T2'], param['T3'], param['T4'], param['Ka'], param['Ta'],\n param['Kp'], param['Kv'], param['Vamax'], param['Vamin'], param['Vsmax'], param['Vsmin'],\n param['FVthresh'], param['RSthresh'], param['switch'], param['status']]\n\n Settings.Pss.append(psatlist)\n Settings.DevicesAtBus[model.lower()].append({'Bus': busidx , 'Id' : id})\n\n\n elif model == 'IEEEST':\n Settings.pss1count += 1\n busidx = data[0]\n id = data[2]\n data = data[3:]\n if busidx in Settings.SynStore.keys():\n dev = 'PV'\n gen_idx = busidx\n\n elif busidx in Settings.SWStore.keys():\n dev = 'SW'\n gen_idx = busidx\n\n else:\n raise KeyError\n\n param = {'bus': busidx,\n 'gen': gen_idx,\n 'type': 0,\n 'AVR': Settings.pss1count,\n 'PSSmodel': 1,\n 'PSSin': 1,\n 'Vmaxsout': EMPTY,\n 'Vminsout': EMPTY,\n 'Kw': EMPTY, # Stabilizer Gain\n 'Tw': EMPTY, # Washout Time\n 'A1': data[0],\n 'A2': data[1],\n 'A3': data[2],\n 'A4': data[3],\n 'A5': data[4],\n 'A6': data[5],\n 'T1': data[6],\n 'T2': data[7],\n 'T3': data[8],\n 'T4': data[9],\n 'T5': data[10],\n 'T6': data[11],\n 'Ks': data[12],\n 'Lsmax': data[13],\n 'Lsmin': data[14],\n 'Vcu': data[15],\n 'Vcl': data[16],\n 'Ka': EMPTY, # Gain for additional signal\n 'Ta': EMPTY, # Time constant for additional signal\n 'Kp': EMPTY, # Gain for active power\n 'Kv': EMPTY, # Gain for bus voltage magnitude\n 'Vamax': EMPTY, # additonal signal(anti-windup)\n 'Vamin': EMPTY, # additional signal(windup)\n 'Vsmax': EMPTY, # max output(no additonal)\n 'Vsmin': EMPTY, # min output(no additional)\n 'FVthresh': EMPTY,\n 'RSthresh': EMPTY,\n 'switch': 1,\n 'status': 1,\n }\n\n psatlist = [param['AVR'], param['PSSmodel'], param['PSSin'], param['Vsmax'], param['Vsmin'], param['Kw'],\n param['Tw'], param['T1'], param['T2'], param['T3'], param['T4'], param['Ka'], param['Ta'],\n param['Kp'], param['Kv'], param['Vamax'], param['Vamin'], param['Vsmax'], param['Vsmin'],\n param['FVthresh'], param['RSthresh'], param['switch'], param['status']]\n\n Settings.Pss.append(psatlist)\n Settings.DevicesAtBus[model.lower()].append({'Bus': busidx , 'Id' : id})\n\n\n elif model == 'DFIG':\n busidx = data[0]\n id = 1 #Id number defaults to 1 for Wind Devices for now\n data = data[2:]\n if busidx in Settings.BusStore.keys():\n pass\n\n else:\n raise KeyError\n\n param = {'bus': busidx,\n 'speednum': data[0],\n 'Sn': EMPTY,\n 'Vn': 20,\n 'freq': 60,\n 'Rs': EMPTY,\n 'Xs': data[1],\n 'Rr': EMPTY,\n 'Xr': data[4],\n 'Xu': EMPTY,\n 'Hm': data[2],\n 'Kp': EMPTY,\n 'Tp': EMPTY,\n 'Kv': EMPTY,\n 'Teps': EMPTY,\n 'R': EMPTY,\n 'npoles': EMPTY,\n 'nb': EMPTY,\n 'nGB': EMPTY,\n 'pmax': EMPTY,\n 'pmin': EMPTY,\n 'qmax': EMPTY,\n 'qmin': EMPTY,\n 'status': data[0],\n }\n\n psatlist = [busidx, param['speednum'], param['Sn'], param['Vn'], param['freq'], param['Rs'],\n param['Xs'], param['Rr'], param['Xr'], param['Xu'], param['Hm'], param['Kp'],\n param['Tp'], param['Kv'], param['Teps'], param['R'], param['npoles'], param['nb'],\n param['nGB'], param['pmax'], param['pmin'], param['qmin'], param['qmin'], param['status']]\n\n Settings.Dfig.append(psatlist)\n Settings.DevicesAtBus[model.lower()].append({'Bus': busidx , 'Id' : id})\n\n\n elif model == 'WTE':\n # Type 1 and 2\n busidx = data[0]\n id = 1\n data = data[2:]\n if busidx in Settings.BusStore.keys():\n pass\n\n else:\n raise KeyError\n\n param = {'model': data[0],\n 'nomspeed': 13,\n 'airdens': 1.225,\n 'tau': 4,\n 'delT': 0.1,\n 'c': 20,\n 'k': 2,\n 'Tsr': 5,\n 'Ter': 15,\n 'Vwr': 0,\n 'Tsg': 5,\n 'Teg': 15,\n 'Vwg': 0,\n 'Z0': 0.01,\n 'fstep': 0.2,\n 'nharm': 50\n }\n psatlist = [param['model'], param['nomspeed'], param['airdens'], param['tau'],\n param['delT'], param['c'], param['k'], param['Tsr'], param['Ter'], param['Vwr'],\n Settings.WindStore[busidx]['H'], param['Tsg'], param['Teg'], param['Vwg'], param['Z0'],\n param['fstep'], param['nharm']]\n\n Settings.WindStore[busidx].update(param)\n Settings.Wind.append(psatlist)\n Settings.DevicesAtBus[model.lower()].append({'Bus': busidx , 'Id' : id})\n\n elif model == 'WTT':\n # Type 1 and 2\n busidx = data[0]\n id = 1\n data = data[3:]\n if busidx in Settings.BusStore.keys():\n pass\n\n else:\n raise KeyError\n\n param = {'H': data[0],\n 'Damp': EMPTY,\n 'Htf': EMPTY,\n 'Freq1': EMPTY,\n 'Dshaft': EMPTY\n }\n psatlist = [param['H'], param['Damp'], param['Htf'], param['Freq1'], param['Dshaft']]\n Settings.WindStore[busidx] = param\n Settings.Wind.append(psatlist)\n Settings.DevicesAtBus[model.lower()].append({'Bus': busidx , 'Id' : id})\n\n elif model == 'WT4G1':\n # Type 4 Wind generator\n busidx = data[0]\n psatlist = [busidx, 1, 100, 100, 60, 1, 0.1, 0.1, 0.1, 0.1,\n 3, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 0, 1, 1]\n Settings.Dfig.append(psatlist)\n else:\n logging.warning('Skipping unsupported mode <{}> on bus {}'.format(model, data[0]))", "def _add_info(self, infos: dict, info: dict, env_num: int) -> dict:\n for k in info.keys():\n if k not in infos:\n info_array, array_mask = self._init_info_arrays(type(info[k]))\n else:\n info_array, array_mask = infos[k], infos[f\"_{k}\"]\n\n info_array[env_num], array_mask[env_num] = info[k], True\n infos[k], infos[f\"_{k}\"] = info_array, array_mask\n return infos", "def _add_record(days_dict, record, key):\n days_dict[key] = {\n \"Name\": record[\"title\"],\n \"Owner\": record[\"owner\"],\n \"Severity\": record[\"severity\"],\n \"Created\": (time.strftime(SIRPPipeline.TIME_FMT, time.gmtime(record[\"createdAt\"] / 1000.0))),\n }\n if \"endDate\" in record:\n days_dict[key].update(\n {\n \"Closed\": (time.strftime(SIRPPipeline.TIME_FMT, time.gmtime(record[\"endDate\"] / 1000.0),)),\n \"Resolution\": record[\"resolutionStatus\"],\n }\n )", "def _add_dict_values(self, d1, d2):\n\n if d1 is None and d2 is None:\n return None\n\n d1 = d1 or {}\n d2 = d2 or {}\n\n added = {}\n for key in set(list(d1.keys()) + list(d2.keys())):\n added[key] = dict(d1.get(key, {}), **(d2.get(key, {})))\n return added", "def _append_params(name, param_values, dependency, index, sample):\n\n # Fit distribution\n current_params = Fit._fit_distribution(sample, name)\n\n # Create basic fit object\n basic_fit = BasicFit(*current_params, sample)\n\n for i in range(index, len(dependency)):\n # Check if there is a dependency and whether it is the right one\n if dependency[i] is not None and \\\n dependency[i] == dependency[index]:\n # Calculated parameter is appended to param_values\n param_values[i].append(current_params[i])\n return basic_fit", "def add(self, variables):\n if not isinstance(variables, (list, tuple)):\n variables = [variables]\n\n for v in variables:\n if isinstance(v, Variable):\n self.list.append(v)\n elif isinstance(v, dict):\n self.list.append(Variable.create(**v))\n else:\n raise NotImplementedError\n\n self.generate_from_halton()\n for v in self.list:\n if (\n any(e in v.kind.lower() for e in EXCLUDE_FROM_HALTON)\n and v.__class__ != OutputVariable\n ):\n v.generate_values()\n\n for v in self.list:\n if v.__class__ == OutputVariable:\n dep = [v if isinstance(v, str) else v[\"name\"] for v in v.dependent]\n ind = [\n i\n for i in self.list\n if i.__class__ == IndependentVariable and i.name in dep\n ]\n if ind:\n v.resolve_dependent(ind)", "def add_entry(source,lbs):\n\tnow = datetime.now()\n\tdate = now.strftime('%m-%d-%Y')\n\tdata = {date: {'Date': date, 'Weight': lbs}}\n\tsource.inject(data)", "def _load_depfile(casename=None):\n\n data={}\n \n if casename==None:\n print('_load_depfile requires a filename to load.')\n return\n try:\n fp=open(casename+'_dep.dat','r')\n except IOError:\n print('_load_depfile: invalid case name.')\n return data\n\n dep_str=fp.readline().split('=')\n dep_num=int(dep_str[1])\n t_data1=np.genfromtxt(casename+'_dep.dat',skip_header=1)\n fp.close()\n\n data['dep_num']=dep_num\n data['x']=t_data1[:,0]\n data['y']=t_data1[:,1]\n data['h']=t_data1[:,2]\n data['nodexy']=t_data1[:,0:2]\n \n return data", "def add_datamodel(self, dm):\n\n assert isinstance(dm, self.base_model), 'value must be a {0}'.format(self.base_name)\n\n self[dm.release] = dm", "def _update_dep_from_data(cls, dep, data: dict) -> None:\n if not dep.description:\n dep.description = data['summary']\n\n if not dep.authors:\n dep.authors = []\n if data['author']:\n dep.authors.append(Author(\n name=data['author'],\n mail=data.get('author_email') or None,\n ))\n if data['maintainer']:\n dep.authors.append(Author(\n name=data['maintainer'],\n mail=data.get('maintainer_email') or None,\n ))\n dep.authors = tuple(dep.authors)\n\n if not dep.links:\n if data['project_urls']:\n dep.links = {k.lower(): v for k, v in data['project_urls'].items()}\n if data['package_url'] and data['package_url'] not in dep.links.values():\n dep.links['package'] = data['package_url']\n if data['project_url'] and data['project_url'] not in dep.links.values():\n dep.links['project'] = data['project_url']\n\n if not dep.classifiers:\n dep.classifiers = tuple(data['classifiers'])\n\n if not dep.license:\n dep.license = cls._get_license(data)", "def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')", "def add(self, key, value):\n self.data.append((key, value))", "def add_variable(self, name, domain):\n self.variables.append(name)\n self.domains[name] = list(domain)\n self.constraints[name] = {}", "def get_data_dict(params, x):\n parameters = {}\n for i, p in enumerate(feature_map.ordered_parameters):\n parameters[p] = x[i]\n for i, p in enumerate(var_form.ordered_parameters):\n parameters[p] = params[i]\n return parameters", "def add_donation(donor, donation_amount, donor_dict):\n donor_dict.setdefault(donor, []).append(donation_amount)", "def build_dict(arg):\n # helper function to the Evaluator.to_property_di_graph() method that\n # packages the dictionaries returned by the \"associate_\" family of\n # functions and then supplies the master dict (one_dict) to the Vertex\n # obj as **kwargs\n one_dict = {}\n for ar in arg:\n one_dict.update(ar)\n return one_dict", "def add_vars(size):\n return {'regu_c': cvx.Variable(shape=size, name='regu_c'),\n 'regd_c': cvx.Variable(shape=size, name='regd_c'),\n 'regu_d': cvx.Variable(shape=size, name='regu_d'),\n 'regd_d': cvx.Variable(shape=size, name='regd_d')}", "def build_param_and_data_dict(self, s_gen, xr, yr, r):\n # Note it is important to create a new dictionary here so that\n # we reset the data dict after generating new data\n self.data = {\n 'DT': self.dt,\n 'motion_prior': self.motion_prior,\n 'motion_gen': self.motion_gen,\n 'ds': self.ds,\n 'de': self.de,\n 'L0': self.l0,\n 'L1': self.l1,\n 'GAMMA': self.gamma,\n 'lamb': self.lamb,\n 'fista_c': self.fista_c,\n 'D': self.tc.t_D.get_value(),\n 'N_L': self.n_l,\n 'N_T': self.n_t,\n 'L_I': self.l_i,\n 'L_N': self.l_n,\n 'N_g_itr': self.n_g_itr,\n 'N_itr': self.n_itr,\n 'N_P': self.n_p,\n 'XS': self.tc.t_XS.get_value(),\n 'YS': self.tc.t_YS.get_value(),\n 'XE': self.tc.t_XE.get_value(),\n 'YE': self.tc.t_YE.get_value(),\n 'Var': self.tc.t_Var.get_value(),\n 'G': self.tc.t_G.get_value(),\n 'tau': self.tau,\n 'XR': xr, 'YR': yr,\n 'IE': self.tc.t_IE.get_value(),\n 'S_gen': s_gen,\n 'S_gen_name': self.s_gen_name,\n 'R': r,\n 'Ips': self.Ips,\n 'FP': self.FP,\n 'quad_reg': self.quad_reg,\n 'quad_reg_mean': self.quad_reg_mean,\n 'drop_prob': self.drop_prob,\n 's_range': self.s_range,\n }", "def add_to_dict_fail( self, key_0, key_1, list_2 ):\n dict_1_new = { key_1: list_2 }\n\n\n dict_1_current = self._dd_dict.get( key_1 )\n if dict_1_current is None:\n #dict_1_new = dict_1_new\n pass\n\n else:\n dict_1_new = {**dict_1_current, **dict_1_new }\n\n\n dict_0_new = { key_0: dict_1_new } # maybe a merge ??\n\n dict_0_current = self._dd_dict.get( key_0 )\n\n if dict_0_current is None:\n dict_0_new = dict_0_new\n\n else:\n dict_0_new = {**dict_0_current, **dict_0_new }\n\n dict_0_new = {**dict_0_current, **dict_0_new }\n\n self._dd_dict = { **self._dd_dict, **dict_0_new }\n\n print( self._dd_dict )\n\n return self._dd_dict", "def update_labels(self,label_dict):\n \t\tfor key in self.deps:\n \t\t\tfor dependent in self.deps[key]:\n \t\t\t\tlabel = dependent[1]\n \t\t\t\tlabel_dict[label] = label_dict.get(label,0) + 1\n \t\treturn label_dict", "def __init__(self, initial_data=[]):\n hdict.__init__(self)\n\n for elt in initial_data:\n self.add(elt)", "def add_data_path(self, path_key: str, path_name: str):\n self.add_data_paths({path_key: path_name})", "def addInvariants(invar1, invar2):\n invar_sum= {}\n for key in invar1.keys():\n invar_sum[key] = np.array(np.add(invar1[key], invar2[key]))\n \n return(invar_sum)", "def addDependency(self, mods):\n for modname in mods:\n # if one adds a dependency that is found in optional modules\n # change it from optional to required\n if( modname in self.optmodules ):\n self.buildWithout([modname])\n\n if( (not modname in self.reqmodules) and \\\n (not modname in self.reqmodules_buildonly) and \\\n (not modname in self.reqmodules_external) and \\\n self.name != modname ):\n self.reqmodules.append(modname)", "def add_other_meta_data(self, other: _MetaData) -> None:\n\n for key in other._meta_data_dict.keys():\n self.add_data(key, other._meta_data_dict[key])", "def _add_related(related, dep, all_related, index, connector=None):\n doc = {}\n doc[\"relationForm\"] = dep\n doc[\"rawName\"] = related\n doc[\"tokenIndex\"] = int(index)\n doc[\"offsetStart\"] = A.lookup[int(index)][\"start\"]\n doc[\"offsetEnd\"] = A.lookup[int(index)][\"end\"]\n doc[\"connector\"] = \"\" if connector is None else connector\n if not doc in all_related:\n all_related.append(doc)\n return all_related", "def add_dict(dest, src):\n for key in src.keys():\n if key in dest.keys():\n dest[key] += src[key]\n else:\n dest[key] = src[key]", "def import_step_depends_from_dict(self, workflow_dict, step_name2id):\n for step in workflow_dict['steps'].values():\n depend_list = step['depend']\n if not depend_list:\n depend_list = ['root']\n for depend in depend_list:\n parent_id = depend if depend == 'root' else step_name2id[depend]\n if not self.add_depend({\n 'child_id': step['step_id'],\n 'parent_id': parent_id\n }):\n Log.an().error(\n 'cannot add step dependency (parent->step): %s->%s',\n depend, step['name']\n )\n return False\n\n return True", "def add_file(self, key, dict, data):\n try:\n # If new file (aka, data passed in, write file)\n path = os.path.join(self.file_path, '%s.xoj' % key)\n f = open( path, 'w' )\n f.write(data)\n f.close()\n\n self.fileList[key] = dict\n except:\n print \"Error writing file\", path\n\n self.save()", "def add(self, optim=None, more_data=[]):\r\n raise NotImplementedError()", "def add(self, optim=None, more_data=[]):\r\n raise NotImplementedError()", "def _update_default_dict(main, other):\r\n for k, v in other.items():\r\n main[k] += v", "def push(self, data: Dict[str, np.ndarray]) -> None:\n for key, value in data.items():\n self.data[key].extend(value)\n\n if self._keys is None:\n self._keys = list(self.data.keys())", "def set_additional_depot_info(self, depot_info):\n depot_config.add_addition_depot_into(depot_info) # pragma: no cover", "def add(self, key, value):", "def _add_data_to_model(self, qinfos):\n if len(qinfos) == 0:\n return\n new_points = np.empty((0, self.domain_dim))\n new_vals = np.empty(0)\n for i in range(len(qinfos)):\n new_points = np.concatenate((new_points,\n qinfos[i].point.reshape(-1, self.domain_dim)), axis=0)\n new_vals = np.append(new_vals, [qinfos[i].val], axis=0)\n if self.gp is not None:\n self.gp.add_data(new_points, new_vals)", "def _compute_new_data(self, desvars, responses, objectives, constraints, metadata):\n # type: (dict, dict, dict, dict, dict) -> np.ndarray\n raise NotImplementedError", "def addKnowledge(colname,knowns,load_dependencies,colnames):\n\td,n = dDict[colname],len(knowns.values()[0])\n\n\tprint 'adding '+colname\n\n\tdef args(i): return [knowns[arg][i] for arg in d.inputcols]\n\n\tknowns[colname]=[]\n\n\tnoskip = load_dependencies or colname in colnames\n\n\tif not noskip: print 'reading %s from table'%colname\n\n\tfor i in range(n):\n\t\tprint '%d/%d'%(i+1,n) ; sys.stdout.write(\"\\033[F\") # Cursor up one line\n\t\tif noskip:\n\t\t\tknowns[colname].append(d.func(*args(i)) if d.none or args(i).count(None) == 0 else None)\t\n\t\telse:\n\t\t\tknowns[colname].append(db.query1(colname,'fwid',knowns['fwid'][i]))\n\treturn knowns", "def append_to(self, key, value):\n self.get_data()[key].append(value.get_data())", "def add_dict_entry(dictionary: dict, key: Any, value: Any) -> None:\n try:\n dictionary[key].append(value)\n except KeyError:\n dictionary[key] = [value]", "def extend_inventory(self, datapath, variable_type='all', extra_pref=None, first_suffix=None):\n if extra_pref is None:\n extra_pref = self.extra_pref\n if first_suffix is None:\n first_suffix = self.first_suffix\n \n files = [file for file in listdir(datapath) if '.nc' in file and not 'xyz' in file]\n # file_prefixes = list(set([ file.split('_')[0] for file in files ]))\n # file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n if extra_pref:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2] + [extra_pref]) for file in files ]))\n else:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n \n inventory = {}\n for file_prefix in file_prefixes:\n fname = path.join(datapath,f'{file_prefix}{first_suffix}')\n if not self.metafile:\n self.metafile = fname\n vars = [ var for var in list(Dataset(fname).variables) if var not in self.skip_vars ]\n for var in vars:\n inventory[var] = {'files': sorted([path.join(datapath,file) \n for file in listdir(datapath) if file_prefix in file])}\n \n if variable_type == 'predictors':\n self.predictor_inventory = {**self.predictor_inventory, **inventory}\n self.predictors = self.predictor_inventory.keys()\n elif variable_type == 'predictands':\n self.predictand_inventory = {**self.predictand_inventory, **inventory}\n self.predictands = self.predictand_inventory.keys()\n else:\n self.predictor_inventory = {**self.predictor_inventory, **inventory}\n self.predictors = self.predictor_inventory.keys()\n self.predictand_inventory = {**self.predictand_inventory, **inventory}\n self.predictands = self.predictand_inventory.keys()", "def add_variable(self, var):\n self.var_list.append(var)\n self.var_dict[var.name] = var", "def add_control_dep_mappings(\n input_map: dict[str, tf.Tensor]\n) -> dict[str, tf.Tensor]:\n return dict(\n **input_map,\n **{\n make_control_dependency(k): make_control_dependency(v.name)\n for k, v in input_map.items()\n if not is_control_dependency(k)\n },\n )", "def add_dicts(*args, **kwargs):\n result = {}\n for d in args:\n result.update(d)\n result.update(kwargs)\n return result" ]
[ "0.6675702", "0.64973605", "0.6014381", "0.60071164", "0.5916942", "0.58490884", "0.5743055", "0.57284117", "0.5649685", "0.5642036", "0.5622459", "0.5599008", "0.5532404", "0.55121297", "0.5506236", "0.55047566", "0.5473821", "0.5455027", "0.5451647", "0.5436743", "0.54222435", "0.53919524", "0.5380917", "0.53624856", "0.53478223", "0.5345641", "0.53385013", "0.53384936", "0.5333633", "0.53280425", "0.53227514", "0.5322515", "0.5316451", "0.53134745", "0.53129494", "0.52816486", "0.527445", "0.5270776", "0.5258987", "0.5257104", "0.5225449", "0.51956034", "0.5195148", "0.5187956", "0.5184918", "0.5181421", "0.5180024", "0.51796913", "0.5177265", "0.5170561", "0.51683104", "0.51628274", "0.51520133", "0.51502395", "0.51302254", "0.5107323", "0.5082145", "0.5067122", "0.5059143", "0.5053291", "0.5043819", "0.5033027", "0.502429", "0.50226945", "0.50196105", "0.50138867", "0.50129473", "0.5004144", "0.5003439", "0.5001357", "0.5000359", "0.49985215", "0.49968842", "0.4994092", "0.4987709", "0.49807918", "0.49764907", "0.49762234", "0.49712038", "0.49680755", "0.49667865", "0.4966006", "0.4959257", "0.495885", "0.49512547", "0.4951245", "0.4951245", "0.49459633", "0.49450874", "0.4944915", "0.49398237", "0.49373695", "0.493654", "0.4930638", "0.49300689", "0.49275833", "0.4925992", "0.49180862", "0.49174324", "0.49160665" ]
0.69769204
0
Add an array of values
def add_array(self, indep, keys, values): if np.ndim(values) > 1: values = orient(values, keys) dep = {k: v for k, v in zip(keys, values)} self.add_dict(indep, dep)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_values(self, *values, replace=False):\n\n if replace: self.reset_values()\n for value in values: self.values = np.append(self.values, value)", "def add_all(self, *values):\n for value in values:\n self.add(value)", "def _addToArray(self, num, arr):\r\n return [i + num for i in arr]", "def add_values(self, *values, replace=False):\n\n if replace: self.reset_values()\n for value in values: self.values += [tuple(value)]", "def sum(values):\n total = 0\n for i in values:\n total += i\n return total", "def add(self, value):\n self.arr.append(value)", "def _add_list_values(a, b):\n new_list = []\n for i in range(len(a)):\n new_list.append(a[i] + b[i])\n return new_list", "def add(a, b):\n return np.array([x + y for x, y in zip(a, b)])", "def __add__(self, vs):\n ret = self.__elements\n for v in map(tuple, vs):\n if v not in map(tuple, ret):\n ret.append(np.array(v))\n return ret", "def append(arr, values, axis=None):\n arr = asanyarray(arr)\n if axis is None:\n if arr.ndim != 1:\n arr = arr.ravel()\n values = ravel(values)\n axis = arr.ndim-1\n return concatenate((arr, values), axis=axis)", "def add_array(self, value):\n if isinstance(value, str):\n self._data += value.encode(\"utf-8\")\n elif isinstance(value, (bytes, bytearray)):\n self._data += value\n else:\n try:\n for element in value:\n self.add(element)\n except TypeError:\n raise Exception(\"value cannot be encoded as an array\")", "def sum_values(values):\n return (sum(values))", "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return None\n return [arr1[i] + arr2[i] for i in range(len(arr1))]", "def insert_arr(self, val_arr=[]):\n for x in val_arr:\n self.insert(x)", "def add(self, values, counts=None, max_skip_count=None):\n try:\n values = [float(values)]\n except Exception:\n pass\n\n # Returns numpy array, if possible:\n values = fast_flatten(values)\n\n if len(values) == 0:\n return\n\n if HAS_NUMPY:\n if counts is not None:\n counts = numpy.array(counts)\n self._add_via_numpy(values, counts)\n else:\n self._add_via_python(values, counts, max_skip_count)", "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return (None)\n newList = []\n for i in range(len(arr1)):\n newList.append(arr1[i] + arr2[i])\n return (newList)", "def add_to_buffer(self, values):\n self._buffer.extend(values)", "def add_arrays(arr1, arr2):\n n = len(arr1)\n m = len(arr2)\n if n != m:\n return None\n return [arr1[i] + arr2[i] for i in range(n)]", "def add(self, value):", "def __add__(self, other):\n return asarray(add(self, other))", "def add(self,*datas):\n\t\tresult = sum(datas)\n\t\treturn result", "def add(self,v2): \n n = len(self.a)\n m = len(v2.a)\n c = []\n if n != m:\n print(\"Incompatible Types\")\n return\n\n for i in range(n):\n c.append(self.a[i]+v2.a[i])\n\n return c", "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return None\n return [sum(element_wise) for element_wise in zip(arr1, arr2)]", "def sum(cls, vectors):\n result = cls.null()\n for vector in vectors:\n result += vector\n return result", "def add(self, value):\n if self.squared:\n if isinstance(value, list):\n value = [np.square(x) for x in value]\n else:\n value = np.square(value)\n\n if isinstance(value, list):\n for i in range(0, len(value)):\n self.value[i] = np.multiply(self.decay, self.value[i]) + np.multiply((1. - self.decay), value[i])\n else:\n self.value = np.multiply(self.decay, self.value) + np.multiply((1. - self.decay), value)", "def __add__(self, right_arr):\n concat_arr = self.copy() # Create new instance to return\n concat_arr.extend(right_arr)\n return concat_arr", "def sum_values(self):\n raise NotImplementedError", "def add_lists(a,b):\r\n\r\n for i in range(len(a)):\r\n a[i] += b[i]\r\n return a", "def add_op(target_nodata, *array_list):\r\n result = numpy.zeros(array_list[0].shape, dtype=numpy.float32)\r\n valid_mask = numpy.zeros(result.shape, dtype=numpy.bool)\r\n for array in array_list:\r\n # nodata values will be < 0\r\n local_valid_mask = array >= 0\r\n valid_mask |= local_valid_mask\r\n result[local_valid_mask] += array[local_valid_mask]\r\n result[~valid_mask] = target_nodata\r\n return result", "def add(*args):\n #convert args to floats so we can do the maths\n values = list(args)\n for x in range(len(values)):\n values[x] = float(values[x])\n \n summation = str(ft.reduce(oper.add,values))\n return summation", "def _addVectors(X1,X2):\n _checkSize(X1,X2)\n return [ X1[i] + X2[i] for i in range(len(X1))]", "def vec_add_scalar (x, c):\n return [x_i+c for x_i in x]", "def add(a,b):\n return [a[0]+b[0],a[1]+b[1],a[2]+b[2],1.0]", "def add(self, value):\n ind = self._ind % self.shape[0]\n self._values[ind] = value\n self._ind += 1\n self._cached = False", "def __add__(self, other):\n return self + [other]", "def add(a, b):\n return [a[i] + b[i] for i in range(2)]", "def smart_add(*args):\n result = 0\n for item in args:\n result += item\n\n return result", "def add_vectors(u, v): #11.22.5\r\n new_vector = []\r\n \"\"\"Because they have same length so we\r\n should take advantage from this one\"\"\"\r\n for i in range(len(u)):\r\n m = u[i] + v[i] # Get their value of i index at the same time!\r\n new_vector.append(m)\r\n return new_vector", "def sum(self, values):\n return self.aggregate(values, \"sum\")", "def __add__(self, other):\n if len(self) != len(other):\n raise ValueError('As dimensões devem ser iguais')\n\n result = Vector(len(self)) # inicia um novo array do tamanho do próprio\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result", "def add(self, *args):\n sum = 0\n for arg in args:\n sum += float(arg)\n return sum", "def __array_append(self, in_a,in_b):\n in_b = np.array([in_b]) if isinstance(in_b,(int,float,long,complex)) else in_b\n return np.concatenate((in_a,in_b))", "def __iadd__(self, other):\n if isinstance(other, Seq2):\n if len(self) == len(other):\n self._vectors = [a + b for a, b in zip(self, other)]\n return self\n else:\n raise ValueError(\"cannot add arrays with different lengths\")\n else:\n try:\n b = Vec2(*other)\n except Exception:\n return NotImplemented\n self._vectors = [a + b for a in self]\n return self", "def sum_array(arr):\n sum = 0\n for num in arr:\n sum += num\n return sum", "def add(self, *items):", "def __add__(self, _v):\n\t\tif len(self) == len(_v):\n\t\t\tans = copy.deepcopy(self)\n\t\t\tfor i in range(0, self.n):\n\t\t\t\tans[i] += _v[i]\n\t\t\treturn ans", "def add_vectors(v, u):\n return (v[0] + u[0], v[1] + u[1])", "def add(self, value):\n pass", "def add_params(l, name, values):\n if type(values) != list:\n values = [values] * len(l)\n if len(l) != len(values):\n raise ValueError(\n \"Enequal length lists in add_params {} {}\".format(\n len(l), len(values)))\n for i, val in enumerate(values):\n l[i][name] = val\n return l", "def add_inputs(self, inputs):\n self.inputs += inputs", "def vector_add(v, w):\n\treturn [v_i + w_i for v_i, w_i in zip(v, w)]", "def update(self, values: List[int]) -> None:\n ...", "def update(self, values: List[int]) -> None:\n ...", "def __add__(self, other):\n\t\tif len(self) != len(other):\n\t\t\traise ValueError('dimensions must agree')\n\t\tresult = Vector(len(self))\n\t\tfor j in range(len(self)):\n\t\t\tresult[j] = self[j] + other[j]\n\t\treturn result", "def evaluate(self, seq, begin, end, *args):\n return reduce(operator.add, args, [])", "def vectorAdd(a, b):\n return [a[i] + b[i] for i, j in enumerate(a)]", "def sum_elements(arr):\n return sum(arr)", "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return None\n\n matrix_sum = []\n for i, j in zip(arr1, arr2):\n matrix_sum.append(i + j)\n return matrix_sum", "def add4(a,b):\n return [a[0]+b[0],a[1]+b[1],a[2]+b[2],a[3]+b[3]]", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v,w)]", "def extend(self, i):\n for x in i:\n self.add(x)", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v, w)]", "def __add__(self, other):\n if isinstance(other, Seq2):\n if len(self) == len(other):\n return other.from_points(\n a + b for a, b in zip(self, other))\n else:\n raise ValueError(\"cannot add arrays with different lengths\")\n else:\n try:\n b = Vec2(*other)\n except Exception:\n return NotImplemented\n return self.from_points(a + b for a in self)", "def add(self,name,list,axes=''):\n array = np.array(list,float)\n self[name] = array\n self._axes[name] = axes", "def add(numbers):\n sum1 = 0\n for i in numbers:\n sum1 = sum1+i\n\n return sum1", "def push_many(self, values):\n self._tail_iters.append(iter(values))", "def add_nodes(self, *values):\n for value in values:\n self.add_node(value)", "def addAll(self, *args):\n pass", "def summed(*values):\n values = [_normalize(v) for v in values]\n for v in zip(*values):\n yield sum(v)", "def addInPlace(self, value1, value2):\n raise NotImplementedError", "def sum_extend(t, init):\n total = init\n for x in t:\n total.extend(x)\n return total", "def add_list_numbers(incoming_list: list):\n return sum(incoming_list)", "def sumValues(aList):\r\n sum = 0\r\n for d in aList:\r\n sum += d\r\n return sum", "def assignment_by_sum(x, values, indices, axis=0):\n x_new = copy(x)\n values = array(values)\n use_vectorization = hasattr(indices, \"__len__\") and len(indices) < ndim(x)\n if _is_boolean(indices):\n x_new[indices] += values\n return x_new\n zip_indices = _is_iterable(indices) and _is_iterable(indices[0])\n if zip_indices:\n indices = list(zip(*indices))\n if not use_vectorization:\n len_indices = len(indices) if _is_iterable(indices) else 1\n len_values = len(values) if _is_iterable(values) else 1\n if len_values > 1 and len_values != len_indices:\n raise ValueError(\"Either one value or as many values as indices\")\n x_new[indices] += values\n else:\n indices = tuple(list(indices[:axis]) + [slice(None)] + list(indices[axis:]))\n x_new[indices] += values\n return x_new", "def __add__(self, other):\n if len( self) != len(other):\n raise ValueError('Dimensions must match.')\n result = Vector(len(self))\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result", "def Array( *args ):\n tArray = ()\n for arg in args:\n tArray += (arg,)\n return tArray", "def Array( *args ):\n tArray = ()\n for arg in args:\n tArray += (arg,)\n return tArray", "def add_value(trajectories, val_func):\n for trajectory in trajectories:\n observes = trajectory['observes']\n values = val_func.predict(observes)\n trajectory['values'] = values", "def test_add():\n l = [1, 2, 3, 4]\n assert s7.add(*l) == sum(l)\n assert s7.add(100, 200) == 300\n assert s7.add(1.0, 2.0, 100.0) == 103.0", "def add(self, params):\n if len(params) < 2:\n return\n x = self.reg_dct[params[0]]\n y = self.reg_dct[params[1]]\n self.reg_dct[params[0]] = (x + y) % (2** 32)", "def append_value(self, value):\n self.value += value", "def append_row(self, values):\n self.range(self._op.max_row + 1, 1, len(values)).values = values", "def test_radd(self):\n tensor = Tensor([2, 4, 6, 8])\n result = 1 + tensor\n result_np = np.array(1) + tensor\n result_arr = [1, 1, 1, 1] + tensor\n\n assert result.data.tolist() == [3, 5, 7, 9]\n assert result_np.data.tolist() == [3, 5, 7, 9]\n assert result_arr.data.tolist() == [3, 5, 7, 9]", "def _cartesian_add(xs):\n return sum(prefer_static.reshape(x, shape=[-1] + [1]*(len(xs) - 1 - i))\n for i, x in enumerate(xs))", "def _ipu_multi_update_add(op, grads):\n return [\n None, None,\n gen_popops_ops.ipu_multi_slice(\n grads[2],\n indices=op.inputs[1],\n indices_are_sorted=op.get_attr(\"indices_are_sorted\"))\n ]", "def extend(self, *args):\n for arg in args:\n self.add(arg)", "def sum(self, values: pdarray, skipna: bool = True) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"sum\", skipna)\n return k, cast(pdarray, v)", "def values(self, values):\n self.data.values = values", "def __radd__(self, left_arr):\n concat_arr = left_arr.copy() # Create new instance to return\n concat_arr.extend(self)\n return concat_arr", "def __add__(self, rhs: Union[float, Simpy]) -> Simpy:\n result: list[float] = []\n if isinstance(rhs, float):\n for item in self.values:\n result.append(item + rhs)\n else:\n assert len(self.values) == len(rhs.values)\n for i in range(len(self.values)):\n result.append(self.values[i] + rhs.values[i])\n return Simpy(result)", "def vector_add(a, b):\n assert(len(a) == len(b))\n\n from operator import add\n return tuple(map(add, a, b))", "def add(self, items):\n if isinstance(items, list):\n self.items.extend(items)\n else:\n self.items.append(items)", "def addedrow(self, y, addlist):\n out = self[y]\n for x in xrange(0, len(out)):\n out[x] = out[x]+addlist[x]\n return out", "def append(x, ys):\n return list(ys) + [x]", "def append(self, val):\n self._values.push(val)", "def add(self, number):\n\n return self.from_list([x+number for x in self.vector])", "def addAll(self,*args, **kwargs):\n pass", "def __add__(self, other):\n n = len(self)\n\n if n != len(other):\n raise(VetorError, \"Vetor dimensions are not equal\")\n\n v = zeros_como(self)\n\n for i in range(n):\n v[i] = self[i] + other[i]\n\n return v", "def test_op_add_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_r = offl_a + o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def add(numbers):\n \n result = numbers[0]\n for n in numbers[1:]:\n result = n + result\n return result" ]
[ "0.76679605", "0.74897724", "0.7482708", "0.74698025", "0.6868423", "0.68615234", "0.67837113", "0.6779977", "0.67769617", "0.6654942", "0.66449934", "0.6613038", "0.6609851", "0.6605651", "0.65964", "0.6575036", "0.6555835", "0.6468301", "0.6383492", "0.6353225", "0.63494134", "0.63447946", "0.62914366", "0.6259933", "0.6258927", "0.62574804", "0.62368286", "0.6235561", "0.61240834", "0.61020684", "0.60953987", "0.60953134", "0.60780877", "0.6065177", "0.6062441", "0.60432196", "0.6033662", "0.6026588", "0.6019959", "0.60184455", "0.59967536", "0.59871745", "0.5979786", "0.597338", "0.5969815", "0.5947098", "0.59434813", "0.59337366", "0.5929673", "0.59271383", "0.59217334", "0.5897904", "0.5897904", "0.5887844", "0.58662933", "0.58577013", "0.58484864", "0.5838671", "0.5830654", "0.58250576", "0.58145535", "0.58036274", "0.5791012", "0.57906055", "0.5788675", "0.57799006", "0.57595074", "0.57575786", "0.57517475", "0.5741596", "0.5736607", "0.57324404", "0.57213396", "0.5715724", "0.5711497", "0.5710666", "0.5710666", "0.5709559", "0.56998056", "0.5694184", "0.56923944", "0.56891817", "0.56728464", "0.56722295", "0.56716216", "0.5655592", "0.5644781", "0.5644203", "0.5642319", "0.56422424", "0.5638713", "0.5637887", "0.5627164", "0.562651", "0.5624324", "0.5624265", "0.5609997", "0.56068605", "0.56040746", "0.56031406" ]
0.6270565
23
Return a generator for entries that all include the keys
def filter(self, keys, lst=None, func="all"): f = all if func == "all" else any if lst is None: lst = self if DEP in lst[0] and INDEP in lst[0]: filt_dep = True else: filt_dep = False def filt_func(d): if filt_dep: return f([k in d[INDEP] or k in d[DEP] for k in listify(keys)]) else: return f([k in d for k in listify(keys)]) return filter(filt_func, lst)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iterentries(self):\n for key in self.iterkeys():\n yield self.get(key)", "def __iter__(self) -> Generator:\n for k in self.raw.keys():\n yield k", "def __iter__(self):\n for bucket in self._table:\n if bucket is not None:\n for key in bucket:\n yield key", "def keys(self):\n for ts in self:\n yield ts", "def __iter__(self, *args, **kwargs):\n for key in self.keys(*args, **kwargs):\n yield key", "def items(self):\n for key in self._sorted:\n yield key, self._map[key]", "def __iter__(self):\n\n for each in list(self.keys()):\n yield each", "def get_entries(self) -> Generator[str, None, None]:\n return (entry for entry in self.entries)", "def __iter__(self):\n\n # For each key in set of keys\n for key in self.keys_set:\n\n # Yield that key and associated value\n yield key, self.__getitem__(key)", "def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj", "def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj", "def iterkeys(self):\n\n for bucket in self.buckets.itervalues():\n for key in bucket.iterkeys():\n yield key", "def exact_key_items(self):\n for key_node, value in self.get_tree_entries():\n for key in self._defining_context.infer_node(key_node):\n if is_string(key):\n yield key.get_safe_value(), LazyTreeValue(self._defining_context, value)", "def __iter__(self):\n if self._len_keys == 1:\n yield from self._dict.keys()\n else:\n for key in self._dict.keys():\n yield tuple(sorted(list(key)))", "def iteritems(self):\n for key in self:\n yield key, self[key]", "def items(self, args=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None, rec=False):\n for idx_key, key in self.pairs(args, lo, hi, reverse, max,\n include, txn):\n obj = self.coll.get(key, txn=txn, rec=rec)\n if obj:\n yield key, obj\n else:\n warnings.warn('stale entry in %r, requires rebuild' % (self,))", "def _yield_keys(self, key):\n if self._len_keys > 1:\n keys = self._validate_and_split_key(key)\n for key in keys:\n yield tuple(sorted(list(key)))\n else:\n yield from self._validate_and_split_key(key)", "def keysAll():", "def _get_keys(self, ckey):\n if self.has_key(ckey):\n doc = self[ckey]\n else:\n doc = [o for o in self.get_values(ckey)]\n if isinstance(doc, dict):\n for key in doc.keys():\n if ckey.rfind('%s.' % key) == -1:\n combo = '%s.%s' % (ckey, key)\n yield combo\n vals = [v for v in self.get_values(combo)]\n for kkk in helper_loop(combo, vals):\n yield kkk\n else:\n yield ckey\n elif isinstance(doc, list):\n for item in doc:\n if isinstance(item, dict):\n for key in item.keys():\n if ckey.rfind('%s.' % key) == -1:\n combo = '%s.%s' % (ckey, key)\n yield combo\n vals = [v for v in self.get_values(combo)]\n for kkk in helper_loop(combo, vals):\n yield kkk\n elif isinstance(item, list):\n for elem in item:\n if isinstance(elem, dict):\n for kkk in elem.keys():\n yield '%s.%s' % (ckey, kkk)\n else:\n yield ckey\n else: # basic type, so we reach the end\n yield ckey\n else: # basic type, so we reach the end\n yield ckey", "def iteritems(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n key = keys.next()\n yield (key, self[key])\n return make_iter()", "def __iter__(self):\n for key in sorted(self.keys):\n yield key, self[key]", "def iterkeys(self, multi=False):\n root = self.root\n curr = root[NEXT]\n if multi:\n while curr is not root:\n yield curr[KEY]\n curr = curr[NEXT]\n else:\n yielded = set()\n yielded_add = yielded.add\n while curr is not root:\n k = curr[KEY]\n if k not in yielded:\n yielded_add(k)\n yield k\n curr = curr[NEXT]", "def RecurseKeys(self):\n yield self\n for subkey in self.GetSubkeys():\n for key in subkey.RecurseKeys():\n yield key", "def __iter__(self):\n return self.ordered_keys.__iter__()", "def items(self):\n for metakey in self:\n yield metakey, self[metakey]", "def __iter__(self):\r\n for item in self._data:\r\n yield item # yield the KEY\r", "def iteritems(self):\n for key in self:\n yield (key, self[key])", "def RecurseKeys(self):\n root_key = self.GetRootKey()\n if root_key:\n for registry_key in root_key.RecurseKeys():\n yield registry_key", "def __iter__(self):\n return iter(self.keys())", "def __iter__(self):\n for key in self._ctx:\n yield key", "def __iter__(self):\n\t\treturn self.keys()", "def __iter__( self ) :\n\n for entry in self.__entries : yield entry", "def __iter__(self):\n for domain in self.keys():\n yield domain", "def __iter__(self):\n for key, value in self.read():\n yield key, value", "def iterkeys(self):", "def iterkeys(self):", "def random_keys(self):\n while True:\n yield self.generator.str()", "def __iter__(self):\n return self.keys()", "def _map___iter__(self):\n return self.iterkeys()", "def __iter__(self):\n for key in itertools.chain(list(self._opts.keys()),\n list(self._groups.keys())):\n yield key", "def itervalues(self):\n for key in self:\n yield self[key]", "def iterkeys(self):\n return self.__iter__()", "def get_all_keys(self):\n r = []\n with self.lock:\n for key in self.keys():\n if self.get(key):\n r.append(key)\n\n return r", "def iteritems(self):\n return iter((kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all())", "def all(self):\n self.scan()\n return self.entries", "def keys(self):\n pattern = r'^\\d+-aws-billing-csv-[\\d+]{4}-[\\d+]{2}.csv$'\n for key in self.bucket.get_all_keys():\n if re.search(pattern, key.name):\n yield key", "def __iter__(self):\n cursor=0\n while cursor<len(self):\n yield self._item[cursor].key\n cursor+=1", "def __iter__(self):\n for acronym in self.keys:\n yield acronym, self.dict[acronym]", "def __iter__(self):\n for key in self._catalogs:\n yield key", "def keys(self):\n sql = u\"\"\"\n SELECT `key` FROM `{table}` WHERE 1\n \"\"\".format(table=self.name)\n\n for row in self.conn.execute(sql):\n yield row['key']", "def iterkeys(self):\n return iter(kvp.key for kvp in self.keyvaluepair_set.all())", "def keys(self, key=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None, rec=None):\n return itertools.imap(ITEMGETTER_0,\n self.items(key, lo, hi, reverse, max, include, txn, rec))", "def getItemIter(self):\n for key, val in self.db.getAllItemIter(db=self.sdb, split=False):\n keys = tuple(key.decode(\"utf-8\").split('.'))\n yield (keys, self.klas(qb64b=bytes(val)))", "def __iter__(self):\n for key in chain(\n self.HEAD_KEYS, (key for key, _ in self.HEAD_EXTRA), self.HEAD_FROM\n ):\n yield key", "def itermerged(self):\n for key in self:\n val = _dict_getitem(self, key)\n yield val[0], b', '.join(val[1:])", "def __iter__(self):\n\n return iter([key for key in self._data.keys()])", "def keys(self):\n key_index = 0x4\n\n for _ in range(0, self._keys_len()):\n key_offset = self.abs_offset_from_hbin_offset(self.unpack_dword(key_index))\n\n d = HBINCell(self._buf, key_offset, self)\n yield NKRecord(self._buf, d.data_offset(), self)\n key_index += 4", "def keys(self):\n key_index = 0x4\n\n for _ in range(0, self._keys_len()):\n key_offset = self.abs_offset_from_hbin_offset(self.unpack_dword(key_index))\n d = HBINCell(self._buf, key_offset, self)\n\n try:\n for k in d.child().keys():\n yield k\n except RegistryStructureDoesNotExist:\n raise ParseException(\"Unsupported subkey list encountered.\")\n\n key_index += 4", "def keys(self):\n key_index = 0x4\n\n for _ in range(0, self._keys_len()):\n key_offset = self.abs_offset_from_hbin_offset(self.unpack_dword(key_index))\n\n d = HBINCell(self._buf, key_offset, self)\n yield NKRecord(self._buf, d.data_offset(), self)\n key_index += 8", "def _expand_keys(entities):\n keys = list(entities.keys())\n values = list(product(*[entities[k] for k in keys]))\n return [{k: v for k, v in zip(keys, combs)} for combs in values]", "def __iter__(self):\n with SessionContext(self.SessionClass) as session:\n keys = session.query(PAW2_DBObject.key)\n keys = [c[0] for c in keys]\n random.shuffle(keys)\n return keys.__iter__()", "def __iter__(self):\n for x in sorted(self.keys()):\n yield self[x]", "def items(self):\n\t\tfor k, vs in self.multiple.items():\n\t\t\tfor v in vs: yield k, v", "def iterkeys(self):\n return DictKeysIterator(self)", "def iteroriginal(self):\n for key in self:\n vals = _dict_getitem(self, key)\n for val in vals[1:]:\n yield vals[0], val", "def items(self, *args, **kwargs):\n return [ (key, self._get(key, *args, **kwargs),) for key in self.keys(*args, **kwargs) ]", "def keys(self):\n return iter(k for k, _ in self._pairs())", "def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value", "def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value", "def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None):\r\n more_results = True\r\n k = None\r\n while more_results:\r\n rs = bucket.get_all_keys(prefix=prefix, marker=marker,\r\n delimiter=delimiter, headers=headers)\r\n for k in rs:\r\n yield k\r\n if k:\r\n marker = k.name\r\n more_results= rs.is_truncated", "def iter_keys(self, search, itersize=None, client=None, version=None):\r\n\r\n if client is None:\r\n client = self.get_client(write=False)\r\n\r\n pattern = self.make_key(search, version=version)\r\n cursor = b\"0\"\r\n\r\n while True:\r\n cursor, data = client.scan(cursor, match=pattern, count=itersize)\r\n\r\n for item in data:\r\n item = smart_text(item)\r\n yield self.reverse_key(item)\r\n\r\n if cursor == b\"0\":\r\n break", "def itervalues(self, key=None):\n if key != None:\n vals = self.get(key)\n if vals != None:\n for val in vals:\n yield val\n else:\n for key in self.iterkeys():\n vals = self.get(key)\n for val in vals:\n yield val", "def mapper_get_items(self, key_dict, value):\n for k in key_dict:\n lineitems = key_dict[k]\n # lineitems=value.split(\",\")\n if int(self.options.iteration) == 1:\n self.increment_counter(\"association_rules\", 'transaction_count', 1)\n for item in lineitems:\n yield item, 1\n else:\n itemsets = combinations(lineitems, self.options.iteration)\n frequent_itemsets = filter(lambda x: set(x) not in self.frequent_items, itemsets)\n for itemset in frequent_itemsets:\n yield itemset, 1", "def itervalues(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n yield self[keys.next()]\n return make_iter()", "async def keys(self) -> Iterable[str]:", "def iterkeyrefs(self):\n for key in self.iterkeys():\n yield ref(key)", "def keys_iterator(keyspace, table_name):\n \n keys = keys_for_table(keyspace, table_name)\n row_keys = keys['row_keys']\n clustering_keys = keys['clustering_keys']\n columns = keys['columns']\n\n\n try:\n key_str = \", \".join(str(r) for r in row_keys)\n key_where_str = \"\"\"AND \"\"\".join((str(r) + \"=%(\" + str(r) + \")s \") for r in row_keys)\n\n keys_query = \"\"\"SELECT DISTINCT %s FROM %s \"\"\" % (key_str, table_name)\n keys_itr = simple_execute(keys_query)\n for k in keys_itr:\n yield k\n except:\n default_log.error(\"Exception while attempting to find all partition keys %s \" % format_exc())\n default_log.info(\"SELECT DISTINCT failed. This is not necessarily a surprise, \"\n \"it's likely that either rpc_timeout is exceeded or you are \"\n \"running an older version of cassandra where SELECT DISTINCT \"\n \"is not supported, falling back to token iteration \")\n\n\n try:\n starting_token_key_str = \", \".join(str(r) for r in row_keys)\n starting_token_where_str = \"\"\"AND \"\"\".join((str(r) + \"=%(\" + str(r) + \")s \") for r in row_keys)\n query = \"\"\"SELECT token(%s) as tablecleaner_token, %s FROM %s LIMIT 1\"\"\" % ( starting_token_key_str, \n starting_token_key_str, \n table_name )\n starting_token_row = simple_execute(query)\n if len(starting_token_row):\n default_log.debug(starting_token_row[0])\n else:\n return\n\n starting_token = starting_token_row[0].tablecleaner_token\n\n more_tokens = True\n while more_tokens:\n default_log.debug(\"Iterating through tokens, more tokens %s , current token %s \" % (more_tokens, starting_token))\n query = \"\"\"SELECT token(%s) as tablecleaner_token, %s FROM %s \n WHERE token(%s) > %s LIMIT 20 \"\"\" % ( starting_token_key_str, \n starting_token_key_str, \n table_name, \n starting_token_key_str,\n starting_token)\n params = {'starting_token': starting_token}\n rows = simple_execute(query, params)\n if len(rows) < 1:\n more_tokens = False\n else:\n for r in rows:\n yield r\n\n starting_token = r.tablecleaner_token\n\n except:\n default_log.error(\"Error: %s \" % format_exc())\n return", "def permutations(self, key):\n yield key", "def getItemIter(self):\n for key, raw in self.db.getAllItemIter(db=self.sdb, split=False):\n keys = tuple(key.decode(\"utf-8\").split('.'))\n yield (keys, coring.Serder(raw=bytes(raw)))", "def __iter__(self):\n if self.empty():\n return\n for node in self.root:\n yield node.key", "def __iter__(self):\n # This could be as simple as \"return self._getKeyList().__iter__()\"\n # but this performs some extra consistency checking to make sure the\n # key we iterate to actually exists, to keep us from crashing if\n # our db is a little out of sync with itself.\n\n # This is a nasty hack because our db seems prone to circular links\n nItems = 0\n for item in self._getKeyList():\n if item in self:\n yield item\n nItems += 1\n # NASTY HACK!\n if nItems > 1000:\n self.reindex()\n raise Exception(\"Circular link corrected, try again\")\n else:\n self._delKey(item)", "def AllKeys(self) -> _n_0_t_1[str]:", "def __iter__(self):\n return iter(self._key_order)", "def source_keys(self):\n for source_key in self._counts.keys():\n yield source_key", "def keys(self, _prec=\"\"):\n if self.isLeaf:\n yield _prec + self.ch\n\n for chld in self.children.values():\n yield from chld.keys(_prec + self.ch)", "def get_values(self, ckey):\n for next_key, item in yield_obj(self, ckey):\n if isdictinstance(item):\n for final, elem in yield_obj(item, next_key):\n if isdictinstance(elem) and elem.has_key(final):\n yield elem[final]\n else:\n yield elem\n elif isinstance(item, list) or isinstance(item, GeneratorType):\n for final, elem in item:\n for last, att in yield_obj(elem, final):\n if isdictinstance(att) and att.has_key(last):\n yield att[last]\n else:\n yield att", "def filter_keys_out(items, keys):\n for key, value in items.items():\n if key in keys:\n continue\n yield key, value", "def __iter__(self) -> Iterator[str]:\n return iter(self._keys)", "def __iter__(self):\n seen = set()\n for elem, group in self._mapping.items():\n if elem not in seen:\n yield group\n seen.update(group)", "def iterkeys(self):\n return iter(self._sequence)", "def keys(self, args=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None):\n return itertools.imap(ITEMGETTER_1,\n self.pairs(args, lo, hi, reverse, max, include, txn))", "def all_key_seqs(template):\n result = []\n for k, v in template.iteritems():\n if isinstance(v, dict):\n for suffix in all_key_seqs(v):\n result.append([k] + suffix)\n else:\n result.append([k])\n return result", "def collect(self, *keys):\n items = []\n for key in keys:\n if key.endswith(b\"*\"):\n key, explode = key[:-1], True\n else:\n explode = False\n if b\":\" in key:\n key, max_length = key.partition(b\":\")[0::2]\n max_length = int(max_length)\n else:\n max_length = None\n value = self.values.get(key)\n if isinstance(value, dict):\n if not value:\n items.append((key, None))\n elif explode:\n items.extend((key, _) for _ in value.items())\n else:\n items.append((key, value))\n elif isinstance(value, (tuple, list)):\n if explode:\n items.extend((key, _) for _ in value)\n else:\n items.append((key, list(value)))\n elif max_length is not None:\n items.append((key, value[:max_length]))\n else:\n items.append((key, value))\n return [(key, value) for key, value in items if value is not None]", "def items(self):\n # Collect all pairs of key-value entries in each bucket\n all_items = []\n for bucket in self.buckets:\n all_items.extend(bucket.items())\n return all_items", "def __iter__(self):\n\n return self._entries.__iter__()", "def entrySet(self):\n set = HashSet()\n for key in keySet():\n set.add(Entry(key, self.get(key)))\n return set", "def items(self):\n return [(kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all()]", "def rds_scan_keys(rds, glob):\n n = 0\n keys = []\n while(True):\n n, k = rds.scan(n, match=glob)\n keys.extend(k)\n if n == 0:\n break\n return keys", "def __iter__(self):\n yield from chain.from_iterable(self.data.values())", "def all():\n # results = [String.from_dict(redis.hgetall(key)) for key in redis.keys() if key != 'index']\n results = []\n for key in redis_store.keys(String.generate_key('*')):\n data = pickle.loads(redis_store.get(key))\n string = String(data['key']).deserialize(data)\n results.append(string)\n return results", "def get_all(self, key, *path, **data):\n\t\twhile True:\n\t\t\tresponse = self.get(*path, **data)\n\t\t\titems = response[key]\n\t\t\tif not items:\n\t\t\t\treturn\n\t\t\tfor item in items:\n\t\t\t\tyield item\n\t\t\tif '_links' not in response or 'next' not in response['_links']:\n\t\t\t\treturn\n\t\t\tpath = [response['_links']['next']]\n\t\t\tdata = {}" ]
[ "0.7377886", "0.69465625", "0.68848234", "0.6851275", "0.6776351", "0.6718517", "0.669439", "0.6664863", "0.6648368", "0.6597698", "0.6597698", "0.65608925", "0.65233594", "0.64917487", "0.6451746", "0.6446408", "0.6437362", "0.6426836", "0.6422858", "0.6407071", "0.63981986", "0.63566864", "0.6346078", "0.63292897", "0.6308591", "0.6270178", "0.62642884", "0.62613845", "0.6256731", "0.6256651", "0.62413347", "0.6224999", "0.62218904", "0.6213953", "0.61952823", "0.61952823", "0.61880624", "0.6171452", "0.6157733", "0.61554474", "0.61347497", "0.6132551", "0.6131177", "0.61129487", "0.6103885", "0.6103205", "0.60962355", "0.609297", "0.60891575", "0.6086832", "0.60684085", "0.60681677", "0.6067882", "0.6065302", "0.6057623", "0.60560405", "0.6042351", "0.6042045", "0.6032895", "0.60302085", "0.60262495", "0.6025318", "0.60186905", "0.6018021", "0.601467", "0.6012313", "0.60074973", "0.59952253", "0.59952253", "0.59902596", "0.5985316", "0.59820735", "0.5976994", "0.5975455", "0.5973299", "0.59688646", "0.5958248", "0.59557736", "0.5951642", "0.594511", "0.59428567", "0.59427214", "0.59397334", "0.593541", "0.5930977", "0.59215355", "0.5907569", "0.5901969", "0.590107", "0.5899543", "0.58895636", "0.58607453", "0.5858569", "0.5856545", "0.5853064", "0.58391833", "0.58314186", "0.58078384", "0.5798821", "0.5798679", "0.57937944" ]
0.0
-1
Return a list of entries that all include the keys
def filtered(self, keys, lst=None, func="all"): lst = self if lst is None else lst if len(lst) == 0: raise ValueError("No rows in list") return [row for row in self.filter(keys, lst, func=func)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_keys(self):\n r = []\n with self.lock:\n for key in self.keys():\n if self.get(key):\n r.append(key)\n\n return r", "def filter_by_keys(self, keys):\n return list(filter(lambda item: item.keyword in set(keys), self._metadata))", "def keys(self):\r\n return [k for k in self]", "def get_keys(self, ckey=None):\n if ckey:\n keys = self._get_keys(ckey)\n else:\n keys = self.keys()\n for key in self.keys():\n keys += [k for k in self._get_keys(key)]\n return list(set(keys))", "def keys(self):\n return [ x for x in self ]", "def list_all_keys(self):\n \n return self.keys", "def keys(self, *args, **kwargs):\n return self._list(*args, **kwargs)", "def exclusively(self, keys, lst=None):\n minimal = self.minimal() if lst is None else lst\n\n def make_exclusive(d, keys):\n dct = {}\n for k in keys:\n if k in d:\n dct[k] = d[k]\n else:\n dct[k] = -999\n return dct\n\n lst = []\n for d in minimal:\n dct = make_exclusive(d, keys)\n if len(dct) > 0:\n lst.append(dct)\n return lst", "def entrySet(self):\n set = HashSet()\n for key in keySet():\n set.add(Entry(key, self.get(key)))\n return set", "def items(self):\n # Collect all pairs of key-value entries in each bucket\n all_items = []\n for bucket in self.buckets:\n all_items.extend(bucket.items())\n return all_items", "def all():\n # results = [String.from_dict(redis.hgetall(key)) for key in redis.keys() if key != 'index']\n results = []\n for key in redis_store.keys(String.generate_key('*')):\n data = pickle.loads(redis_store.get(key))\n string = String(data['key']).deserialize(data)\n results.append(string)\n return results", "def keysAll():", "def AllKeys(self) -> _n_0_t_1[str]:", "def items(self):\n return [(kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all()]", "def keys(self):\n # TODO: Collect all keys in each of the buckets\n all_keys = [] # Will store all the key\n for bucket in self.buckets:\n for key in bucket:\n if key is not None:\n all_keys.append(key[0])\n return all_keys", "def get_all_keys(self, headers=None, **params):\r\n return self._get_all([('Contents', self.key_class),\r\n ('CommonPrefixes', Prefix)],\r\n '', headers, **params)", "def all(self):\n self.scan()\n return self.entries", "def get_list(self):\n return sorted(self.__entries.keys())", "def keys(self):\n # Collect all keys in each bucket\n all_keys = []\n for bucket in self.buckets:\n for key, value in bucket.items():\n all_keys.append(key)\n return all_keys", "def union(self, key: str, skip_duplicates=False) -> list:\n result = []\n for items in self.get(key):\n for item in items:\n if skip_duplicates and item in result:\n continue\n result.append(item)\n return result", "def items(self):\n acc = []\n for k in self.keys():\n pm = self._maps[k]\n acc.append((k,pm))\n return acc", "def entries(self):\n return [self._entries[key] for key in self._order]", "def get_all_keys(self):\r\n all_keys = []\r\n for i in range(len(self.hash_table)):\r\n if self.hash_table[i] is not None:\r\n all_keys.append(self.hash_table[i].key)\r\n return all_keys", "def keySet (map):\n ltset = lt.newList()\n for pos in range(lt.size(map['table'])):\n entry = lt.getElement (map['table'], pos+1)\n if (entry['key']!=None and entry['key']!='__EMPTY__'):\n lt.addLast (ltset, entry['key'])\n return ltset", "def items(self):\n x = []\n for k in list(self.keys()):\n x.append((k, self[k]))\n return x", "def get_all_keys(self, headers=None, **params):\r\n key = Key(self.name, self.contained_key)\r\n return SimpleResultSet([key])", "def keys(self) -> List:\n pass", "def key_list(dict):\n list = []\n for key in dict:\n list.append(key)\n return list", "def get_flush_lists(self, keys):\r\n return set(e for flush_list in\r\n filter(None, cache.get_many(keys).values())\r\n for e in flush_list)", "def getall(self, key):\n return self.values.get(key, [])", "def _get_all_credential_keys(self):\n return [dict(key) for key in self._data.keys()]", "def items(self, *args, **kwargs):\n return [ (key, self._get(key, *args, **kwargs),) for key in self.keys(*args, **kwargs) ]", "def keys(self):\n return [kvp.key for kvp in self.keyvaluepair_set.all()]", "def all_key_seqs(template):\n result = []\n for k, v in template.iteritems():\n if isinstance(v, dict):\n for suffix in all_key_seqs(v):\n result.append([k] + suffix)\n else:\n result.append([k])\n return result", "def keys(self):\n return list(self.__iter__())", "def keys(self):\r\n return [key for key, value in self.iteritems()]", "def items(self):\n return [(k, self[k]) for k in self.keys()]", "def getkeys(self):\n return list(self.keys)", "def iterentries(self):\n for key in self.iterkeys():\n yield self.get(key)", "def all_keys(self):\n return self.derivable_keys() + self.loadable_keys()", "def keyValues(self): # real signature unknown; restored from __doc__\n return []", "def keys(self):\n return list(self.iterkeys())", "def keys(self):\n return [key for key, value in self.items()]", "def items(self):\n return list(zip(self.keys(), self.values()))", "def items(self):\n return list(zip(self.keys(), self.values()))", "async def keys(self) -> Iterable[str]:", "def keys(self, installer_context):\n keys = set()\n for source in self.sources:\n keys.update(set(source.keys(installer_context)))\n return list(keys)", "def items(self):\n return [(key, self[key]) for key in self.keys()]", "def items(self):\n return [(key, self[key]) for key in self.keys()]", "def keys(self):\n for ts in self:\n yield ts", "def getitems(self):\n return {k:self.get(k) for k in self.keys}", "def _extra_keys(self):\r\n return []", "def keys(self):\r\n L = []\r\n for wr in self.data.keys():\r\n o = wr()\r\n if o is not None:\r\n L.append(o)\r\n return L", "def keys(self):\r\n L = []\r\n for wr in self.data.keys():\r\n o = wr()\r\n if o is not None:\r\n L.append(o)\r\n return L", "def grab_keys(self,\r\n entrylist,\r\n all_caps=True,\r\n first_caps=True):\r\n\r\n returnkeys = set()\r\n for a_temp in entrylist:\r\n returnkeys = returnkeys.union(self.get_keys_from_note(a_temp))\r\n returnlist = [k_temp for k_temp in returnkeys\r\n if (all_caps\r\n or k_temp != k_temp.upper())\r\n and (first_caps\r\n or k_temp[0]+k_temp[1:]\r\n != k_temp[0].upper()+k_temp[1:])]\r\n return returnlist", "def keys(self, **kwargs) -> Iterable:\n return self.store.keys(**kwargs)", "def hgetall(self, key):\n return self._command(b'HGETALL', key, handler=list_to_dict)", "def get_all_keys(self):\n return self.psettings.allKeys()", "def entries(self):\n if self.preload_metadata and not self._entries:\n self._entries = dict((self._decode_name(entry.key), entry)\n for entry in self.bucket.list())\n return self._entries", "def _get_keys(self, listOfKeys):\n return self._keys", "def keys(self) -> tuple[Hashable, ...]:\n return tuple([self._hashify(item = c) for c in self.contents])", "def build_keyset(self, field):\n keys = set()\n for r in self.__elements__:\n keys.add(getattr(r, field))\n return list(keys)", "def keys(self):\n return [entry.key for entry in self.table if entry.value is not None]", "def duplicates(self, x):\n return list(dict.fromkeys(x))", "def filter_record_keys(record_list, whitelist_keys):\n\n filtered = [{k: v for k, v in [y for y in list(x.items()) if y[0] in whitelist_keys]} for x in record_list]\n return filtered", "def intersecting_entries(self, other):\n self_keys = set(self._entries.keys())\n other_keys = set(other._entries.keys())\n common_keys = self_keys.intersection(other_keys)\n return ([(self._entries[key], other._entries[key])\n for key in common_keys])", "def get_entries_for_topic(cls, topic, entry_id_list):\n\t\tresults = cls.get([cls.create_key(topic, entry_id)\n\t\t\t\t\t\t\t\t\t\t\t for entry_id in entry_id_list])\n\t\t# Filter out those pesky Nones.\n\t\treturn [r for r in results if r]", "def get_nodes(self):\n return_set = set()\n for key in self._main_dictionary:\n return_set.add(key)\n return return_set", "def keys(self):\n\n return list(self.iterkeys())", "def fetch_all_keys():\n response = TIME_TABLE.scan()\n items = response['Items']\n items.sort(key=lambda x: x['timeStamp'])\n response = ''\n for item in items:\n response = '{0}\\n{1}'.format(response, item)\n return response", "def keys(self):\n return _keys(self)", "def keys(self):\n return _keys(self)", "def items(self):\n with self.__plock:\n return map(lambda key: (key, self[key]), self._keys)", "def entries():\n\n\treturn [entry.value for entry in db.session.query(Entry).all()]", "def all(self, return_tuples=False):\n if return_tuples:\n return [(key, value) for (key, value) in self._store.items() if not key.startswith(\"_\")]\n return [key for key in self._store if not key.startswith(\"_\")]", "def get_keys(self) -> list:\r\n keys = []\r\n for key, value in self._items:\r\n keys.append(key)\r\n return keys", "def get_keys(request):\n\n keys=[]\n reports = Report.objects.all().exclude(institute = 'PUBMED')\n for report in reports:\n json_rep = report.report_json\n for el in json_rep.keys():\n if el not in keys:\n keys.append(el)\n json_resp = {'keys':keys}\n return JsonResponse(json_resp)", "def get_all_keyrings(self) -> List[ImaKeyring]:\n return list(self.keyrings.values())", "def items(self):\r\n L = []\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n L.append((o, value))\r\n return L", "def items(self):\r\n L = []\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n L.append((o, value))\r\n return L", "def keys(self, pattern=\"*\"):\n lenOfPrefix = len(self.appendKeys(\"\"))\n return [key[lenOfPrefix:] for key in\n self.redis.keys(self.appendKeys(pattern))]", "def _expand_keys(entities):\n keys = list(entities.keys())\n values = list(product(*[entities[k] for k in keys]))\n return [{k: v for k, v in zip(keys, combs)} for combs in values]", "def readentries(self):\n return list(x for x in self)", "def keys(self) -> List[str]:\n raise NotImplementedError", "def keypairs(self):\n return list(self._list(_keypair.Keypair, paginated=False))", "def keys(self):\n return self.keys", "def keys(self):\n ks = dict.keys(self)\n ks.sort()\n return ks", "def get_adjacent_keys(self, key: str) -> List[str]:\n return [k for k in self.get_adjacent(key)]", "def filter_keys_in_set(ds, keys):\n logger.info(\"For each element in the dataset, keeping only values with keys: %s.\", ', '.join(keys))\n\n def filter_keys(x):\n return {k: v for k, v in x.items() if k in keys}\n\n return ds.map(filter_keys, num_parallel_calls=TF_AUTOTUNE)", "def build_list(self, word_list):\n # Get frequency list for keys\n freq = word_list.groupby('key').agg('count')\n # Filter out only keys with greater or equal frequency to length\n key_list = freq.loc[freq['word'] >= freq.index.str.len()]\n return key_list", "def all_keys(blueprint: Union[dict, list]) -> list:\n\n keys = list()\n\n if isinstance(blueprint, list):\n for item in blueprint:\n keys.extend(all_keys(item))\n elif isinstance(blueprint, dict):\n for key, value in blueprint.items():\n keys.append(key)\n keys.extend(all_keys(value))\n\n return keys", "def get_dictionary_items(self,dictionary):\r\n ret = []\r\n for key in self.get_dictionary_keys(dictionary):\r\n ret.extend((key,dictionary[key]))\r\n return ret", "def keySet (self) -> StringSet:\n\n Logging.trace(\">>\")\n result = set(self._keyToValueMap.keys())\n Logging.trace(\"<<: %r\", result)\n return result", "def keys(self):\n return", "def keys():", "def keys(self):\n\n return self.keys_set", "def keys(self):\n klst = list(self._maps.keys())\n klst.sort()\n return klst", "def combined(self):\n d = self._combined\n return [c for key, c in d.items()]", "def items(self):\r\n return [(k, self[k]) for k in self]", "def get_all_attributes(dataset):\n return dataset.flatMap(lambda x: x.keys()).distinct().collect()" ]
[ "0.69771874", "0.6651784", "0.6631111", "0.6531024", "0.65281665", "0.65107197", "0.64283806", "0.64203656", "0.64011246", "0.63802695", "0.6379789", "0.63782895", "0.63735026", "0.63570124", "0.6356654", "0.63414973", "0.6328313", "0.62799704", "0.6264695", "0.6261566", "0.6260251", "0.6215976", "0.6215335", "0.62142736", "0.6214205", "0.62086654", "0.6176452", "0.6170279", "0.6168288", "0.614796", "0.6146262", "0.61305577", "0.60958254", "0.6091684", "0.608029", "0.6074288", "0.6071083", "0.60374975", "0.6025534", "0.6009177", "0.60039186", "0.5996191", "0.5992917", "0.5986023", "0.5986023", "0.5980325", "0.5977668", "0.5972898", "0.5972898", "0.59666836", "0.5960336", "0.59579533", "0.5954527", "0.5954527", "0.5953663", "0.59120256", "0.59067005", "0.59061253", "0.5894614", "0.5887151", "0.58693427", "0.5866021", "0.5834306", "0.5833994", "0.58321404", "0.5825214", "0.58182985", "0.5818252", "0.5811559", "0.5808227", "0.58050877", "0.58050877", "0.58028954", "0.58004236", "0.5795769", "0.57902336", "0.5788425", "0.5786945", "0.57822114", "0.57822114", "0.5777454", "0.5771227", "0.5771041", "0.57680875", "0.5757705", "0.5757658", "0.5757075", "0.57445425", "0.57423085", "0.5741626", "0.574097", "0.5725765", "0.5725542", "0.57255405", "0.5722864", "0.57190204", "0.5718261", "0.5711103", "0.5707263", "0.5704486" ]
0.6046809
37
Return a generator for entries that all contain keyvalue pairs
def iwhere(self, dct=None, lst=None, **kwargs): dct = {} if dct is None else dct m = dct.copy() m.update(kwargs) if lst is None: lst = self if DEP in lst[0] and INDEP in lst[0]: filt_dep = True else: filt_dep = False def filt_func(d): if filt_dep: return all( [v == d[INDEP].get(k, d[DEP].get(k, None)) for k, v in m.items()] ) else: return all([v == d.get(k, None) for k, v in m.items()]) return filter(filt_func, lst)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iterentries(self):\n for key in self.iterkeys():\n yield self.get(key)", "def iteritems(self):\n return iter((kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all())", "def exact_key_items(self):\n for key_node, value in self.get_tree_entries():\n for key in self._defining_context.infer_node(key_node):\n if is_string(key):\n yield key.get_safe_value(), LazyTreeValue(self._defining_context, value)", "def itervalues(self):\n return iter(kvp.value for kvp in self.keyvaluepair_set.all())", "def items(self):\n return [(kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all()]", "def __iter__(self):\n\n # For each key in set of keys\n for key in self.keys_set:\n\n # Yield that key and associated value\n yield key, self.__getitem__(key)", "def __iter__(self):\n for kv_pair in self._backing:\n if kv_pair and not kv_pair.value is Hashmap.absent:\n yield kv_pair", "def __iter__(self):\n for bucket in self._table:\n if bucket is not None:\n for key in bucket:\n yield key", "def __iter__(self):\n for key, value in self.read():\n yield key, value", "def itervalues(self):\n for key in self:\n yield self[key]", "def __iter__(self) -> Generator:\n for k in self.raw.keys():\n yield k", "def get_values(self, ckey):\n for next_key, item in yield_obj(self, ckey):\n if isdictinstance(item):\n for final, elem in yield_obj(item, next_key):\n if isdictinstance(elem) and elem.has_key(final):\n yield elem[final]\n else:\n yield elem\n elif isinstance(item, list) or isinstance(item, GeneratorType):\n for final, elem in item:\n for last, att in yield_obj(elem, final):\n if isdictinstance(att) and att.has_key(last):\n yield att[last]\n else:\n yield att", "def itervalues(self, key=None):\n if key != None:\n vals = self.get(key)\n if vals != None:\n for val in vals:\n yield val\n else:\n for key in self.iterkeys():\n vals = self.get(key)\n for val in vals:\n yield val", "def get_entries(self) -> Generator[str, None, None]:\n return (entry for entry in self.entries)", "def itervalues(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n yield self[keys.next()]\n return make_iter()", "def iteritems(self):\n for key in self:\n yield key, self[key]", "def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value", "def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value", "def __iter__(self):\n\n for each in list(self.keys()):\n yield each", "def iterkeys(self):\n return iter(kvp.key for kvp in self.keyvaluepair_set.all())", "def items(self):\n\t\tfor k, vs in self.multiple.items():\n\t\t\tfor v in vs: yield k, v", "def itervalues(self, multi=False):\n for k, v in self.iteritems(multi=multi):\n yield v", "def __iter__(self, *args, **kwargs):\n for key in self.keys(*args, **kwargs):\n yield key", "def iteritems(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n key = keys.next()\n yield (key, self[key])\n return make_iter()", "def items(self):\n return [(entry.key, entry.value) for entry in self.table\n if entry.value is not None]", "def get_hashable_entries(nested:dict):\n for key, value in nested.items():\n if isinstance(value, abc.Mapping):\n yield from get_hashable_entries(value)\n else:\n yield (key, value)", "def mapper_get_items(self, key_dict, value):\n for k in key_dict:\n lineitems = key_dict[k]\n # lineitems=value.split(\",\")\n if int(self.options.iteration) == 1:\n self.increment_counter(\"association_rules\", 'transaction_count', 1)\n for item in lineitems:\n yield item, 1\n else:\n itemsets = combinations(lineitems, self.options.iteration)\n frequent_itemsets = filter(lambda x: set(x) not in self.frequent_items, itemsets)\n for itemset in frequent_itemsets:\n yield itemset, 1", "def items(self):\n for key in self._sorted:\n yield key, self._map[key]", "def iteritems(self):\n for key in self:\n yield (key, self[key])", "def values(self):\n for key in self.metadb.values():\n yield key, self.datadb[key]", "def __iter__(self):\r\n for item in self._data:\r\n yield item # yield the KEY\r", "def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj", "def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj", "def items(self):\n for metakey in self:\n yield metakey, self[metakey]", "def items(self, args=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None, rec=False):\n for idx_key, key in self.pairs(args, lo, hi, reverse, max,\n include, txn):\n obj = self.coll.get(key, txn=txn, rec=rec)\n if obj:\n yield key, obj\n else:\n warnings.warn('stale entry in %r, requires rebuild' % (self,))", "def __iter__(self):\n return iter(self.keys())", "def __iter__( self ) :\n\n for entry in self.__entries : yield entry", "def keys(self):\n for ts in self:\n yield ts", "def collect(self, *keys):\n items = []\n for key in keys:\n if key.endswith(b\"*\"):\n key, explode = key[:-1], True\n else:\n explode = False\n if b\":\" in key:\n key, max_length = key.partition(b\":\")[0::2]\n max_length = int(max_length)\n else:\n max_length = None\n value = self.values.get(key)\n if isinstance(value, dict):\n if not value:\n items.append((key, None))\n elif explode:\n items.extend((key, _) for _ in value.items())\n else:\n items.append((key, value))\n elif isinstance(value, (tuple, list)):\n if explode:\n items.extend((key, _) for _ in value)\n else:\n items.append((key, list(value)))\n elif max_length is not None:\n items.append((key, value[:max_length]))\n else:\n items.append((key, value))\n return [(key, value) for key, value in items if value is not None]", "def __iter__(self):\n if self._len_keys == 1:\n yield from self._dict.keys()\n else:\n for key in self._dict.keys():\n yield tuple(sorted(list(key)))", "def gen_key_value_extract(key, var, value, req_keys):\n if hasattr(var,'items'):\n for k, v in var.items():\n if k == key:\n if value.lower() in v.lower():\n v_list = []\n for req_key in req_keys:\n v_list.append(var[req_key])\n v_tup = tuple(v_list)\n yield v_tup\n if isinstance(v, dict):\n for result in gen_key_value_extract(key, v, value, req_keys):\n yield result\n elif isinstance(v, list):\n for d in v:\n for result in gen_key_value_extract(key, d, value, req_keys):\n yield result", "def __iter__(self) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in self._table.items())", "def __iter__(self):\n for value in self.__dict__.values():\n yield value", "def itervalues(self, *args, **kwargs):\n for key in self.iterkeys():\n yield self._get(key, *args, **kwargs)", "def unique_values(self):\n for key in self.metadb.unique_values():\n yield key, self.datadb[key]", "def __next__(self):\n for (k, v) in pairs(self._data):\n yield (v, k)", "def __iter__(self):\n for key in self._ctx:\n yield key", "def items(self):\n return ((key, value) for (key, value) in zip(self.__keys, self.__vals))", "def __iter__(self):\n\n return iter([key for key in self._data.keys()])", "def iter_ents(self, **cond: str) -> Iterator['Entity']:\n items = cond.items()\n for ent in self.entities[:]:\n for key, value in items:\n if key not in ent or ent[key] != value:\n break\n else:\n yield ent", "def __iter__(self):\n seen = set()\n for elem, group in self._mapping.items():\n if elem not in seen:\n yield group\n seen.update(group)", "def __iter__(self):\n\t\treturn self.keys()", "def __iter__(self):\n for key in itertools.chain(list(self._opts.keys()),\n list(self._groups.keys())):\n yield key", "def __iter__(self):\n for acronym in self.keys:\n yield acronym, self.dict[acronym]", "def _get_keys(self, ckey):\n if self.has_key(ckey):\n doc = self[ckey]\n else:\n doc = [o for o in self.get_values(ckey)]\n if isinstance(doc, dict):\n for key in doc.keys():\n if ckey.rfind('%s.' % key) == -1:\n combo = '%s.%s' % (ckey, key)\n yield combo\n vals = [v for v in self.get_values(combo)]\n for kkk in helper_loop(combo, vals):\n yield kkk\n else:\n yield ckey\n elif isinstance(doc, list):\n for item in doc:\n if isinstance(item, dict):\n for key in item.keys():\n if ckey.rfind('%s.' % key) == -1:\n combo = '%s.%s' % (ckey, key)\n yield combo\n vals = [v for v in self.get_values(combo)]\n for kkk in helper_loop(combo, vals):\n yield kkk\n elif isinstance(item, list):\n for elem in item:\n if isinstance(elem, dict):\n for kkk in elem.keys():\n yield '%s.%s' % (ckey, kkk)\n else:\n yield ckey\n else: # basic type, so we reach the end\n yield ckey\n else: # basic type, so we reach the end\n yield ckey", "def dicts(self, value=None):\n if value is None:\n return [dict(zip(self.keys, line)) for line in self.data]\n return [dict(zip(self.keys, line)) for line in self.data if value in line]", "def __iter__(self):\n return self.keys()", "def iterpasswords(self):\n for key in self:\n yield self[key]", "def __iter__(self):\n for domain in self.keys():\n yield domain", "def __iter__(self):\n for v in self._items:\n yield v", "def __iter__(self):\n for key in sorted(self.keys):\n yield key, self[key]", "def iterkeys(self):\n\n for bucket in self.buckets.itervalues():\n for key in bucket.iterkeys():\n yield key", "def __iter__(self):\n for value in dict.__iter__(self):\n for count in range(self[value]):\n yield value", "def iteritems(self):\n\t\tself.filep.seek(self.start + 2048)\n\n\t\t# iterate until we hit the enddata marker\n\t\twhile self.filep.tell() < self.enddata - 1:\n\t\t\t# fetch the lengths of the key and value\n\t\t\t(klen, vlen) = unpack('<LL', self.filep.read(8))\n\n\t\t\t# yield the key and value as a tuple\n\t\t\tyield (self.filep.read(klen), self.filep.read(vlen))", "def getItemIter(self):\n for key, val in self.db.getAllItemIter(db=self.sdb, split=False):\n keys = tuple(key.decode(\"utf-8\").split('.'))\n yield (keys, self.klas(qb64b=bytes(val)))", "def iteroriginal(self):\n for key in self:\n vals = _dict_getitem(self, key)\n for val in vals[1:]:\n yield vals[0], val", "def _yield_keys(self, key):\n if self._len_keys > 1:\n keys = self._validate_and_split_key(key)\n for key in keys:\n yield tuple(sorted(list(key)))\n else:\n yield from self._validate_and_split_key(key)", "def items(self):\n for k, v in self._pairs():\n yield k, util.annotate(v)", "def values(self):\n return [kvp.value for kvp in self.keyvaluepair_set.all()]", "def _map___iter__(self):\n return self.iterkeys()", "def iteritems(self):\n for key, value in self._catalogs.iteritems():\n yield key, value", "def __iter__(self):\n for mapping in self._mappings.values():\n yield mapping", "def __iter__(self):\n yield from chain.from_iterable(self.data.values())", "def __iter__(self):\n\t\tfor key, value in self.__dict__.iteritems():\n\t\t\tif key[0] != '_':\n\t\t\t\tyield value", "def __iter__(self):\n\n return self._entries.__iter__()", "def keys_geq_threshold (Dict, threshold):\n for key, value in Dict.items ():\n if value >= threshold:\n yield key", "def iterresults(self, essid):\n for key in self.iterkeys(essid):\n yield self[essid, key]", "def filter_keys_out(items, keys):\n for key, value in items.items():\n if key in keys:\n continue\n yield key, value", "def GetEntries(self, key, **unused_kwargs):\n value_index = 0\n for value in key.GetValues():\n # Ignore the default value.\n if not value.name:\n continue\n\n # Ignore any value that is empty or that does not contain an integer.\n if not value.data or not value.DataIsInteger():\n continue\n\n # TODO: change this 32-bit integer into something meaningful, for now\n # the value name is the most interesting part.\n text_dict = {}\n text_dict[value.name] = '0x{0:08x}'.format(value.data)\n\n if value_index == 0:\n timestamp = key.last_written_timestamp\n else:\n timestamp = 0\n\n yield event.WinRegistryEvent(\n key.path, text_dict, timestamp=timestamp,\n source_append=': {0:s}'.format(self.DESCRIPTION))\n\n value_index += 1", "def __iter__(self):\n for key in self._catalogs:\n yield key", "def keyValues(self): # real signature unknown; restored from __doc__\n return []", "def query(self, q):\n for key in self.metadb.query(q):\n yield key, self.datadb[key]", "def iter_values(self):\n values = self.values\n if (values is not None):\n yield from values", "def getItemIter(self):\n for key, val in self.db.getAllItemIter(db=self.sdb, split=False):\n keys = tuple(key.decode(\"utf-8\").split('.'))\n yield (keys, bytes(val))", "def keys(self):\n return iter(k for k, _ in self._pairs())", "def __iter__(self):\r\n for attr, value in self.__dict__.items():\r\n a = getattr(self, attr)\r\n if type(a) is list:\r\n if len(a) > 0:\r\n yield attr, a", "async def next(self):\n if not self.has_next():\n raise IteratorExhausted()\n value = await self._source.next()\n if self._values is None:\n return value\n return {k: v for k, v in value.items() if k in self._values}", "def __iter__(self):\n cursor=0\n while cursor<len(self):\n yield self._item[cursor].key\n cursor+=1", "def _get_entries(path): \n with open(path, \"r\", encoding=\"utf-8\") as file:\n entry = \"\"\n for line in file: \n if not line == \"\\n\":\n entry += line\n else:\n yield entry.strip()\n entry = \"\"", "def inner_generator():\n # A buffer where observed query-document features will be stored.\n # It is a list of dictionaries, one per query-document pair, where\n # each dictionary is a mapping from a feature ID to a feature value.\n for p in processed:\n yield p", "def all(self):\n self.scan()\n return self.entries", "def __iter__(self):\n for key in self._group._opts.keys():\n yield key", "def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None):\r\n more_results = True\r\n k = None\r\n while more_results:\r\n rs = bucket.get_all_keys(prefix=prefix, marker=marker,\r\n delimiter=delimiter, headers=headers)\r\n for k in rs:\r\n yield k\r\n if k:\r\n marker = k.name\r\n more_results= rs.is_truncated", "def iterate(self, keys, couple_ids):\n # TODO: Better second iteration over only valid keys?\n for key, block in self.items():\n if [key[x][y] for x, y in couple_ids] == keys:\n yield key, block", "def py__iter__(self, contextualized_node=None):\n # Get keys.\n types = NO_VALUES\n for k, _ in self.get_tree_entries():\n types |= self._defining_context.infer_node(k)\n # We don't know which dict index comes first, therefore always\n # yield all the types.\n for _ in types:\n yield LazyKnownValues(types)", "def entries():\n\n\treturn [entry.value for entry in db.session.query(Entry).all()]", "def iterkeys(self, multi=False):\n root = self.root\n curr = root[NEXT]\n if multi:\n while curr is not root:\n yield curr[KEY]\n curr = curr[NEXT]\n else:\n yielded = set()\n yielded_add = yielded.add\n while curr is not root:\n k = curr[KEY]\n if k not in yielded:\n yielded_add(k)\n yield k\n curr = curr[NEXT]", "def itervalues(self):\n return DictValuesIterator(self)", "def iteritems(self, multi=False):\n root = self.root\n curr = root[NEXT]\n if multi:\n while curr is not root:\n yield curr[KEY], curr[VALUE]\n curr = curr[NEXT]\n else:\n for key in self.iterkeys():\n yield key, self[key]", "def getitems(self):\n return {k:self.get(k) for k in self.keys}", "def iter_ents_tags(\n self,\n vals: Mapping[str, str] = EmptyMapping,\n tags: Mapping[str, str] = EmptyMapping,\n ) -> Iterator['Entity']:\n for ent in self.entities[:]:\n for key, value in vals.items():\n if key not in ent or ent[key] != value:\n break\n else: # passed through without breaks\n for key, value in tags.items():\n if key not in ent or value not in ent[key]:\n break\n else:\n yield ent" ]
[ "0.698165", "0.693192", "0.66643286", "0.665048", "0.6484602", "0.6445664", "0.641337", "0.6401473", "0.6376863", "0.63687515", "0.63678294", "0.63492215", "0.6311998", "0.6230821", "0.6206421", "0.61832714", "0.6166452", "0.6166452", "0.61628634", "0.6138833", "0.61117566", "0.60825735", "0.6049317", "0.6048052", "0.603858", "0.6024721", "0.60157996", "0.5998585", "0.5992761", "0.599138", "0.5966413", "0.59431237", "0.59431237", "0.59048605", "0.590147", "0.58792233", "0.58717996", "0.58595115", "0.5850993", "0.5845851", "0.58399844", "0.5834944", "0.5817545", "0.58099353", "0.5808079", "0.57974744", "0.5792313", "0.5783599", "0.5775987", "0.57707345", "0.57545084", "0.57490945", "0.57370085", "0.57155275", "0.5708719", "0.56904984", "0.5662349", "0.56571597", "0.5653561", "0.5652216", "0.5651903", "0.5648638", "0.5648313", "0.56476927", "0.564414", "0.5640759", "0.5639571", "0.56334376", "0.56292754", "0.562564", "0.5615698", "0.56113243", "0.5609624", "0.56076074", "0.5606873", "0.5605398", "0.5604649", "0.56026286", "0.56013954", "0.5595797", "0.55687785", "0.55612445", "0.55605763", "0.55571103", "0.5553631", "0.5551988", "0.5543926", "0.5539526", "0.55353284", "0.55281836", "0.5525142", "0.55181164", "0.55122554", "0.54991114", "0.54944056", "0.5488893", "0.5488516", "0.548562", "0.5474586", "0.5471474", "0.5456451" ]
0.0
-1
Return a list of entries that all contain keyvalue pairs
def where(self, dct=None, lst=None, **kwargs): return [row for row in self.iwhere(dct, lst, **kwargs)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def items(self):\n return [(kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all()]", "def values(self):\n return [kvp.value for kvp in self.keyvaluepair_set.all()]", "def items(self):\n return [(entry.key, entry.value) for entry in self.table\n if entry.value is not None]", "def keyValues(self): # real signature unknown; restored from __doc__\n return []", "def dicts(self, value=None):\n if value is None:\n return [dict(zip(self.keys, line)) for line in self.data]\n return [dict(zip(self.keys, line)) for line in self.data if value in line]", "def iteritems(self):\n return iter((kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all())", "def getall(self, key):\n return self.values.get(key, [])", "def _filter_search_values(key: str, values: list, collection: list):\n return_data = []\n for item in collection:\n if any(val in values for val in item[key]):\n return_data.append(item)\n return return_data", "def entries():\n\n\treturn [entry.value for entry in db.session.query(Entry).all()]", "def keys(self):\n return [kvp.key for kvp in self.keyvaluepair_set.all()]", "def get_all_keys(self):\n r = []\n with self.lock:\n for key in self.keys():\n if self.get(key):\n r.append(key)\n\n return r", "def itervalues(self):\n return iter(kvp.value for kvp in self.keyvaluepair_set.all())", "def filter_by_keys(self, keys):\n return list(filter(lambda item: item.keyword in set(keys), self._metadata))", "def list_values(key):\n return meta.list_values(key=key)", "def entrySet(self):\n set = HashSet()\n for key in keySet():\n set.add(Entry(key, self.get(key)))\n return set", "def values(self):\n return [self[k] for k in self.keys()]", "def values(self):\n return [self[name] for name in self.keys()]", "def keys(self):\r\n return [k for k in self]", "def values(self):\n return [self[key] for key in self.keys()]", "def values(self):\n return [self[key] for key in self.keys()]", "def values(self):\n return [self[key] for key in self.keys()]", "def items(self):\n # Collect all pairs of key-value entries in each bucket\n all_items = []\n for bucket in self.buckets:\n all_items.extend(bucket.items())\n return all_items", "def items(self):\r\n L = []\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n L.append((o, value))\r\n return L", "def items(self):\r\n L = []\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n L.append((o, value))\r\n return L", "def exclusively(self, keys, lst=None):\n minimal = self.minimal() if lst is None else lst\n\n def make_exclusive(d, keys):\n dct = {}\n for k in keys:\n if k in d:\n dct[k] = d[k]\n else:\n dct[k] = -999\n return dct\n\n lst = []\n for d in minimal:\n dct = make_exclusive(d, keys)\n if len(dct) > 0:\n lst.append(dct)\n return lst", "def items(self):\n return [(k, self[k]) for k in self.keys()]", "def values(self):\r\n return [self[k] for k in self]", "def iterentries(self):\n for key in self.iterkeys():\n yield self.get(key)", "def duplicates(self, x):\n return list(dict.fromkeys(x))", "def iterkeys(self):\n return iter(kvp.key for kvp in self.keyvaluepair_set.all())", "def keys(self):\n return [entry.key for entry in self.table if entry.value is not None]", "def all(self):\n self.scan()\n return self.entries", "def items(self):\n return [(key, self[key]) for key in self.keys()]", "def items(self):\n return [(key, self[key]) for key in self.keys()]", "def key_list(dict):\n list = []\n for key in dict:\n list.append(key)\n return list", "def exact_key_items(self):\n for key_node, value in self.get_tree_entries():\n for key in self._defining_context.infer_node(key_node):\n if is_string(key):\n yield key.get_safe_value(), LazyTreeValue(self._defining_context, value)", "def keySet (map):\n ltset = lt.newList()\n for pos in range(lt.size(map['table'])):\n entry = lt.getElement (map['table'], pos+1)\n if (entry['key']!=None and entry['key']!='__EMPTY__'):\n lt.addLast (ltset, entry['key'])\n return ltset", "def values(self):\n return [_ for _ in self._dict.values()]", "def items(self):\n return ((key, value) for (key, value) in zip(self.__keys, self.__vals))", "def getitems(self):\n return {k:self.get(k) for k in self.keys}", "def keys(self):\r\n return [key for key, value in self.iteritems()]", "def items(self):\n acc = []\n for k in self.keys():\n pm = self._maps[k]\n acc.append((k,pm))\n return acc", "def collect(self, *keys):\n items = []\n for key in keys:\n if key.endswith(b\"*\"):\n key, explode = key[:-1], True\n else:\n explode = False\n if b\":\" in key:\n key, max_length = key.partition(b\":\")[0::2]\n max_length = int(max_length)\n else:\n max_length = None\n value = self.values.get(key)\n if isinstance(value, dict):\n if not value:\n items.append((key, None))\n elif explode:\n items.extend((key, _) for _ in value.items())\n else:\n items.append((key, value))\n elif isinstance(value, (tuple, list)):\n if explode:\n items.extend((key, _) for _ in value)\n else:\n items.append((key, list(value)))\n elif max_length is not None:\n items.append((key, value[:max_length]))\n else:\n items.append((key, value))\n return [(key, value) for key, value in items if value is not None]", "def items(self):\n x = []\n for k in list(self.keys()):\n x.append((k, self[k]))\n return x", "def collect_by_key(pair_iter):\n out = {}\n for (k, v) in pair_iter:\n out[k] = out.get(k, [])\n out[k].append(v)\n return out", "def get_list(self):\n return sorted(self.__entries.keys())", "def items(self):\n return list(zip(self.keys(), self.values()))", "def items(self):\n return list(zip(self.keys(), self.values()))", "def filter_record_keys(record_list, whitelist_keys):\n\n filtered = [{k: v for k, v in [y for y in list(x.items()) if y[0] in whitelist_keys]} for x in record_list]\n return filtered", "def items(self):\r\n return [(k, self[k]) for k in self]", "def items(self, *args, **kwargs):\n return [ (key, self._get(key, *args, **kwargs),) for key in self.keys(*args, **kwargs) ]", "def keys(self):\n return [ x for x in self ]", "def grab_keys(self,\r\n entrylist,\r\n all_caps=True,\r\n first_caps=True):\r\n\r\n returnkeys = set()\r\n for a_temp in entrylist:\r\n returnkeys = returnkeys.union(self.get_keys_from_note(a_temp))\r\n returnlist = [k_temp for k_temp in returnkeys\r\n if (all_caps\r\n or k_temp != k_temp.upper())\r\n and (first_caps\r\n or k_temp[0]+k_temp[1:]\r\n != k_temp[0].upper()+k_temp[1:])]\r\n return returnlist", "def keypairs(self):\n return list(self._list(_keypair.Keypair, paginated=False))", "def values(self):\n # TODO: Collect all values in each of the buckets\n all_values = [] # Will store all the key\n\n for bucket in self.buckets:\n for value in bucket:\n if value is not None:\n all_values.append(value[1])\n return all_values", "def by_label_contains(self, value):\n return {k: v for k, v in self.items() if value in k}", "def key_value_list(d):\n if not isinstance(d, dict) and not isinstance(d, list):\n return []\n\n key_values = []\n\n if isinstance(d, list):\n for entry in d:\n if isinstance(entry, dict):\n key_values.extend(key_value_list(entry))\n else:\n for k, v in d.items():\n if k is None or v is None:\n continue\n\n key_values.append((k, v))\n key_values.extend(key_value_list(v))\n\n return key_values", "def key_value_list(d):\n if not isinstance(d, dict) and not isinstance(d, list):\n return []\n\n key_values = []\n\n if isinstance(d, list):\n for entry in d:\n if isinstance(entry, dict):\n key_values.extend(key_value_list(entry))\n else:\n for k, v in d.items():\n if k is None or v is None:\n continue\n\n key_values.append((k, v))\n key_values.extend(key_value_list(v))\n\n return key_values", "def values(self, *args, **kwargs):\n return [ self._get(doc, *args, **kwargs) for doc in self.keys(*args, **kwargs) ]", "def tag_dict_values (self):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT keyword \"\r\n +\"FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n return self.tag_dict.values()", "def _key_vals(dict_):\n return [(key, val) for key, val in dict_.iteritems()]", "def values(self):\n x = []\n for k in list(self.keys()):\n x.append(self[k])\n return x", "def get_values(self):\n return set(self._table.keys())", "def get_ssh_entries(kdb):\n entries = kdb.entries\n entries = [try_parse_ssh_entry(kdb,e) for e in entries]\n entries = [e for e in entries if e]\n return entries", "def AllKeys(self) -> _n_0_t_1[str]:", "def get_key_values(self):\n return self.key_values", "def get_all_dicts_by_key(pcb_data: List[Dict[str, Any]], key: str) -> List[Dict[str, Any]]:\n res: List[Dict[str, Any]] = list()\n for d in pcb_data:\n if isinstance(d, dict) and key in d.keys():\n res.append(d)\n return res", "def get_all_sets(config: Dict) -> List:\n return [x for x, y in config.items() if y[\"type\"] == \"set\"]", "def values(self):\n values = []\n for key in self.keys():\n values.append(self[key])\n return values", "def all(self):\n return list(six.iteritems(self))", "def list(aMap):\n\tfor bucket in aMap:\n\t\tif bucket:\n\t\t\tfor k, v in bucket:\n\t\t\t\tprint k, v", "def keys(self):\n return [key for key, value in self.items()]", "def filtered(self, keys, lst=None, func=\"all\"):\n lst = self if lst is None else lst\n if len(lst) == 0:\n raise ValueError(\"No rows in list\")\n return [row for row in self.filter(keys, lst, func=func)]", "def get_dictionary_items(self,dictionary):\r\n ret = []\r\n for key in self.get_dictionary_keys(dictionary):\r\n ret.extend((key,dictionary[key]))\r\n return ret", "def valueSet(map):\n ltset = lt.newList()\n for pos in range(lt.size(map['table'])):\n entry = lt.getElement (map['table'], pos+1)\n if (entry['value']!=None and entry['value']!='__EMPTY__'):\n lt.addLast (ltset, entry['value'])\n return ltset", "def all(self, return_tuples=False):\n if return_tuples:\n return [(key, value) for (key, value) in self._store.items() if not key.startswith(\"_\")]\n return [key for key in self._store if not key.startswith(\"_\")]", "def _get_all_credential_keys(self):\n return [dict(key) for key in self._data.keys()]", "def keysAll():", "def list(aMap):\n\tfor bucket in aMap:\n\t\tif bucket:\n\t\t\tfor k,v in bucket:\n\t\t\t\tprint k,v", "def have(keylist, dic):\n return all(key in dic and dic[key] for key in keylist)", "def key_dict_values (self):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT note_index \"\r\n +\"FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n return self.key_dict.values()", "def get_values(self) -> list:\r\n values = []\r\n for key, value in self._items:\r\n values.append(value)\r\n return values", "def get_dictionary_values(self,dictionary):\r\n return [dictionary[k] for k in self.get_dictionary_keys(dictionary)]", "def get_duplicates(lines):\n duplicates = []\n keys_checked = {}\n for line in lines:\n key, value = get_key_and_value_from_line(line=line)\n if key:\n if key in keys_checked:\n duplicates.append(u\"{key}={value}\".format(key=key, value=value))\n translation_in_list = u\"{key}={value}\".format(key=key, value=keys_checked[key])\n if translation_in_list not in duplicates:\n duplicates.append(translation_in_list)\n else:\n keys_checked[key] = value\n return duplicates", "def all():\n # results = [String.from_dict(redis.hgetall(key)) for key in redis.keys() if key != 'index']\n results = []\n for key in redis_store.keys(String.generate_key('*')):\n data = pickle.loads(redis_store.get(key))\n string = String(data['key']).deserialize(data)\n results.append(string)\n return results", "def get_entries_for_topic(cls, topic, entry_id_list):\n\t\tresults = cls.get([cls.create_key(topic, entry_id)\n\t\t\t\t\t\t\t\t\t\t\t for entry_id in entry_id_list])\n\t\t# Filter out those pesky Nones.\n\t\treturn [r for r in results if r]", "def all(self):\r\n return self.attr_dict.keys()", "def keys(self) -> List:\n pass", "def get_dicts(self, clean=False):\n return list(self.iter_dicts(clean=clean))", "def readentries(self):\n return list(x for x in self)", "def get_hcr_records(self):\r\n result = []\r\n for setting in self.settings:\r\n record = HcrRecord(setting.type, setting.value, self.category_uid, setting.id)\r\n flag = setting.flag\r\n if flag:\r\n record.flags = 0\r\n if flag.Uninitialised == '1': record.flags |= HcrRecord.FLAG_UNINITIALIZED\r\n if flag.Modifiable == '1': record.flags |= HcrRecord.FLAG_MODIFIABLE\r\n if flag.Persistent == '1': record.flags |= HcrRecord.FLAG_PERSISTENT\r\n result.append(record)\r\n return result", "def check(self, entry_type:str, x:str):\n keys = set()\n x = self._decode(x)\n\n for log in self.logs:\n for datum in log[entry_type]:\n res = self._follow(datum, x)\n\n if type(res) == dict:\n for key in res.keys():\n keys.add(key)\n elif type(res) == list:\n keys.add('< %d' % len(res))\n \n return list(keys)", "def has(self, key=None, category=None):\n ret = []\n category = category.strip().lower() if category is not None else None\n for keystr in make_iter(key):\n keystr = key.strip().lower()\n ret.extend(bool(attr) for attr in self._getcache(keystr, category))\n return ret[0] if len(ret) == 1 else ret", "def search_all_available_pair(self):\n return self._search({})", "def get_keys(my_dict, val):\n keys=[]\n for key, value in my_dict.items():\n if val == value:\n keys.append(key)\n return keys", "def all_key_seqs(template):\n result = []\n for k, v in template.iteritems():\n if isinstance(v, dict):\n for suffix in all_key_seqs(v):\n result.append([k] + suffix)\n else:\n result.append([k])\n return result", "def get_keys(self, ckey=None):\n if ckey:\n keys = self._get_keys(ckey)\n else:\n keys = self.keys()\n for key in self.keys():\n keys += [k for k in self._get_keys(key)]\n return list(set(keys))", "def list_all_keys(self):\n \n return self.keys", "def get_all_keys(self):\r\n all_keys = []\r\n for i in range(len(self.hash_table)):\r\n if self.hash_table[i] is not None:\r\n all_keys.append(self.hash_table[i].key)\r\n return all_keys", "def dict_filter(indict, key_list):\n \n return dict((key, value) for key, value in list(indict.items()) if key in key_list)", "def build_list(self, word_list):\n # Get frequency list for keys\n freq = word_list.groupby('key').agg('count')\n # Filter out only keys with greater or equal frequency to length\n key_list = freq.loc[freq['word'] >= freq.index.str.len()]\n return key_list" ]
[ "0.7040385", "0.6558992", "0.6447182", "0.6341448", "0.63186115", "0.6311516", "0.6185393", "0.6151298", "0.6077109", "0.60603917", "0.6041254", "0.6039591", "0.60185647", "0.6014903", "0.5886663", "0.5880148", "0.5867619", "0.5864501", "0.58568406", "0.58568406", "0.58568406", "0.5855485", "0.58487064", "0.58487064", "0.5812522", "0.58099645", "0.579661", "0.5775973", "0.57630855", "0.5759569", "0.5755531", "0.5749328", "0.5740005", "0.5740005", "0.57357943", "0.5724324", "0.57228214", "0.5718379", "0.5712194", "0.5704673", "0.56974846", "0.56731266", "0.56690377", "0.5624057", "0.56187415", "0.5618149", "0.56179893", "0.56179893", "0.56120026", "0.56020015", "0.5600307", "0.5592132", "0.55909175", "0.55896664", "0.5589554", "0.5580481", "0.5580089", "0.5580089", "0.5579365", "0.5568866", "0.5561882", "0.5557033", "0.5548242", "0.554731", "0.5546615", "0.5526279", "0.5523697", "0.5519995", "0.5506497", "0.5503187", "0.55009645", "0.549133", "0.5490334", "0.54902613", "0.5490041", "0.54739463", "0.5472482", "0.5472327", "0.54614824", "0.54525787", "0.5451949", "0.54504466", "0.5448166", "0.5444894", "0.5440065", "0.54332423", "0.5425091", "0.5418091", "0.541442", "0.5398755", "0.5397763", "0.53955036", "0.539345", "0.5392027", "0.5390099", "0.5374703", "0.53693086", "0.5368174", "0.5365062", "0.536316", "0.5362927" ]
0.0
-1
A minimal list of data in the Box
def minimal(self): combined = self._combined out = [] for k, d in combined.items(): dct = d[INDEP].copy() dct.update(d[DEP]) out.append(dct) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.data = []\n self.min = None", "def __init__(self):\n self.data = []\n self.min = []\n self.data_ptr = -1\n self.min_ptr = -1", "def __init__(self):\n self.items = []\n self.min = []", "def __init__(self):\n self.data = []\n self.min = sys.maxsize", "def store_empty_graphic_box(self):\n for box in self.laby.empty_box():\n x = box[0] * 40\n y = box[1] * 40\n self.store_emptyBox.append((y, x))\n return self.store_emptyBox", "def __init__(self):\n self.small = []\n self.large = []", "def __init__(self, is_min=True):\n self._data = []\n self.min = is_min", "def base_boxes():\n return sorted(list(set([name for name, provider in _box_list()])))", "def _init_empty(self):\n self._data = []", "def __init__(self):\n self.small = [] \n self.large = []", "def __init__(self):\r\n self.items = []\r\n self.minElement = 0", "def __init__(self):\n self.small = []\n self.big = []", "def get_boxes(self):\r\n\r\n boxes = [(\" \", self.worldbox.tl, self.worldbox.br)]\r\n# boxes = []\r\n boxes += [(\".\", b.tl, b.br) for b in self.wallboxes]\r\n boxes += [(\"x\", b.tl, b.br) for b in self.targetboxes]\r\n agentscale = 100\r\n boxes += [(\"a\", (self.i_state[0] - self.dx * agentscale, self.i_state[1] - self.dx * agentscale),\r\n (self.i_state[0] + self.dx * agentscale, self.i_state[1] + self.dx * agentscale))]\r\n return boxes", "def __init__(self):\r\n self.data = PositionalList()", "def make_free_cell_list():\r\n for row in range(9):\r\n for col in range(9):\r\n if (application.ui.__getattribute__(f'cell{col+1}{row+1}')).text() == \"\":\r\n lst_free_cells.append(Point(row, col))", "def all_minimal():\n results = DatalabData.filter_minimal(None, None, None, False)\n return results", "def test_get_all_boxes(self, postfix_directory):\n print(\"Test_All_Boxes\")\n protein_file = os.path.join(postfix_directory, \"PfATP4.pdb\")\n ligand_file = os.path.join(postfix_directory, \"SJ733.pdb\")\n coords = rdkit_util.load_molecule(protein_file)[0]\n\n boxes = dc.dock.binding_pocket.get_all_boxes(coords)\n assert isinstance(boxes, list)\n # Pocket is of form ((x_min, x_max), (y_min, y_max), (z_min, z_max))\n for pocket in boxes:\n assert len(pocket) == 3\n assert len(pocket[0]) == 2\n assert len(pocket[1]) == 2\n assert len(pocket[2]) == 2\n (x_min, x_max), (y_min, y_max), (z_min, z_max) = pocket\n assert x_min < x_max\n assert y_min < y_max\n assert z_min < z_max", "def FillBox(file):\n with open(file,'r') as f:\n info = f.readline().split()\n box_list = []\n id = 1\n I_list =[]\n for i in range(0,len(info)):\n box_list.append(box(float(info[i]),id,0,[]))\n id+=1\n for line in f:\n words = line.split()\n I_list.append(items(words[0], float(words[1])))\n return box_list, I_list", "def placeholder(self):\n return []", "def items():", "def list_blocks(self, _):\n print(self.data.name)", "def build_boxes(self):\n for index in self.box_space.points:\n if self.rank_of_box[index] == self.my_rank:\n self.my_boxes.append(Box(self, index))", "def generate_empty_slots():\n return {\n 'colors': ['', '', '', '', ''],\n 'pets': ['', '', '', '', ''],\n 'beverages': ['', '', '', '', ''],\n 'cigarettes': ['', '', '', '', ''],\n 'nationality': ['', '', '', '', ''],\n 'numbers': ['', '', '', '', ''],\n }", "def __init__(self, size: int = 100):\n self.data = [None] * size\n self.size = size", "def __init__(self):\n INF = 0x11111111111111111111111111111111\n self.data = []\n self.min = INF", "def __init__(self):\n self.container = [[-1]*1000 for _ in range(1000)]", "def __init__(self):\n self.items=[]\n self.min=float('INF')", "def boxes(self) -> dict:\n return self.data[\"boxes\"]", "def __init__(self):\n # 最小堆\n # 放着的是数据流中 ”较大的一半“\n # 不需要对值进行额外操作\n self.small = []\n\n # 最大堆\n # 放着的是数据流中 ”较小的一半“\n # 需要对值 去负数来实现最大堆\n self.large = []", "def MINET(self):", "def __init__(self, width = 7, height = 7):\n self.cell = [ [EMPTY for r in range(height)] for c in range(width) ]", "def populate_box(box):\n missing = []\n [missing.append(z) for z in range(16) if z not in np.reshape(box, 16)]\n for i in range(4):\n for j in range(4):\n if box[i, j] == 16:\n box[i, j] = missing.pop()", "def basic_data(self) -> list[dict]:\n miner_data = []\n for miner in self.miners:\n miner_data.append({'IP': miner, \"text\": \"\"})\n return miner_data", "def _minimal_vlist(self):\n vlist = list()\n if self.dataset_id == \"phy\" or self.dataset_id == \"bgc\":\n plist = [\n \"data_mode\",\n \"latitude\",\n \"longitude\",\n \"position_qc\",\n \"time\",\n \"time_qc\",\n \"direction\",\n \"platform_number\",\n \"cycle_number\",\n \"config_mission_number\",\n \"vertical_sampling_scheme\",\n ]\n [vlist.append(p) for p in plist]\n\n plist = [\"pres\", \"temp\", \"psal\"]\n if self.dataset_id == \"bgc\":\n plist = [\"pres\", \"temp\", \"psal\", \"doxy\"]\n [vlist.append(p) for p in plist]\n [vlist.append(p + \"_qc\") for p in plist]\n [vlist.append(p + \"_adjusted\") for p in plist]\n [vlist.append(p + \"_adjusted_qc\") for p in plist]\n [vlist.append(p + \"_adjusted_error\") for p in plist]\n\n elif self.dataset_id == \"ref\":\n plist = [\"latitude\", \"longitude\", \"time\", \"platform_number\", \"cycle_number\"]\n [vlist.append(p) for p in plist]\n plist = [\"pres\", \"temp\", \"psal\", \"ptmp\"]\n [vlist.append(p) for p in plist]\n\n return vlist", "def get_boxes(self) -> List[Box]:\n return [Box.from_npbox(npbox) for npbox in self.boxlist.get()]", "def remove_small_boxes(boxlist, min_size):\n # TODO maybe add an API for querying the ws / hs\n xywh_boxes = boxlist.convert(\"xywh\").bbox\n _, _, ws, hs = xywh_boxes.unbind(dim=1)\n keep = ((ws >= min_size) & (hs >= min_size)).nonzero().squeeze(1)\n return boxlist[keep]", "def getFirstData(self) -> ghidra.program.model.listing.Data:\n ...", "def create_list() -> List[Optional[float]]:\n return [None] * num_stations", "def _get_boxes(self):\n return self._boxes", "def __init__(self, vallist=[]):\n self.data = vallist[:]\n self.size = len(self.data)", "def __init__(self, contents=()):\n self. data = [ self._Item(k,v) for k,v in contents ] # empty by default\n if len(self._data) > 1:\n self._heapify()", "def __init__(self):\n self.stack = []\n self.min_elements = []", "def __init__(self):\n self.data = []", "def __init__(self):\n self.data = []", "def __init__(self):\n self.empty_seats = [row * 8 + col for row in self.rows for col in self.cols]", "def __init__(self):\n self.data_stack = []\n self.min_stack = []", "def one_basis(self):\n return self._kbounded_partitions([])", "def __init__(self):\n self.min_list, self.stack = [], []", "def load_boxes(self, data):\r\n\r\n # worldbox represents the total map area\r\n self.worldbox = self.Box((0, 0), (len(data[0]) * self.cellwidth, len(data) * self.cellwidth))\r\n\r\n # create a box corresponding to each character/cell in the map file\r\n tl_x = 0\r\n tl_y = 0\r\n for row in data:\r\n for cell in row:\r\n if cell == \".\":\r\n self.wallboxes += [self.Box((tl_x, tl_y), (tl_x + self.cellwidth, tl_y + self.cellwidth))]\r\n elif cell == \"x\":\r\n self.targetboxes += [self.Box((tl_x, tl_y), (tl_x + self.cellwidth, tl_y + self.cellwidth))]\r\n tl_x += self.cellwidth\r\n tl_x = 0\r\n tl_y += self.cellwidth", "def mezclar_bolsa(self):", "def items(self):", "def __init__(self):\n list.__init__([])\n self.name = ''\n self.ideal_value = None", "def __init__(self, rows, columns, fillValue = None):\n self.data = []\n for row in range(rows):\n dataInRow = []\n for column in range(columns):\n dataInRow.append(fillValue)\n self.data.append(dataInRow)", "def __init__(self):\n self._data = PositionalList()", "def __init__(self):\n self._data = PositionalList()", "def __init__(self):\n self.datastack = []\n self.minstack = []", "def __init__(self, box=[], idx=None):\n self.box: List[int] = box\n self.class_id: int = idx", "def _get_initial_slots(self, rows, cols) -> list:\n slots = []\n for x in range(rows):\n row = []\n for y in range(cols):\n slot = Slot(x=x, y=y, mine=False, available=True, flag=False)\n row.append(slot)\n slots.append(row)\n return slots", "def _available_boxes(self, graph):\n return sorted([node.name for node in graph.available_nodes()\n if not isinstance(node.meta, Ibox)])", "def __init__(self, min_cables):\n self.num_gearboxes = 1\n self.min_cables = min_cables\n first_gearbox_name = ''.join(('gearbox_', str(self.num_gearboxes - 1)))\n self.gearboxes = [gearbox.Gearbox(self.min_cables, \n name=first_gearbox_name)]\n self.cables_per_gearbox = self.gearboxes[0].max_cables\n self.bundles_per_gearbox = self.gearboxes[0].max_bundles\n self.gearbox_added = False\n self.surprise_history = []\n self.recent_surprise_history = [0.] * 100", "def __init__(self):\n self._data = PositionalList() # list of _Item instances", "def dataPrep(mydata: list) -> list:\n mylist = [int(elm) for elm in mydata]\n\n volt = int(max(mylist)) + 3\n start = 0\n\n mylist.extend([volt, start])\n mylist.sort()\n\n return mylist", "def __init__(self, data=None):\n self.head = None \n if data is not None:\n for value in data:\n self.append(value)", "def __init__(self):\n self._data = PositionalList() # list of Item instances", "def list():", "def list():", "def __init__(self, contents=()):\n self._data = [self._Item(k, v) for k,v in contents] # empty by default\n if len(self._data) > 1:\n self._heapify()", "def extract_labels_full(scene: \"Scenemaker\") -> List[Tuple[int, np.ndarray]]:\r\n objects = utils.select_collection(scene.target_collection)\r\n boxes_list = []\r\n\r\n for obj in objects:\r\n objclass = obj.name.split(\".\")[0]\r\n dim = obj.dimensions\r\n rot = normalize_rotations(np.array(obj.rotation_euler))\r\n loc = change_to_spawnbox_coords(np.array(obj.location))\r\n boxes_list.append((scene.name2num[objclass], np.concatenate((loc, dim, rot))))\r\n\r\n return boxes_list", "def full(self):\n return [cell for cell in self.compact if cell.peg]", "def __init__(self):\r\n self._items = [[] for _ in range(20)]", "def __init__(self, initial_exclusion_list=None):\n self.exclusion_list = BoxHolder()\n if initial_exclusion_list is not None: # add initial list\n for initial in initial_exclusion_list:\n self.exclusion_list.add_box(initial)", "def __init__(self):\n self._data=[]", "def test_fill(self):\r\n for _ in range(SIZE):\r\n self.nb.add(_)\r\n\r\n self.assertFalse(self.nb.isEmpty())\r\n self.assertTrue(self.nb.isFull())\r\n self.assertEqual(5, len(self.nb))", "def create_prior_boxes(self):\n # value of k for each feature map to create k^2 boxes for each feature map\n feature_map_dims = {'conv4_3': 38, 'conv7': 19, 'conv8_2': 10, 'conv9_2': 5}\n\n # scale for boxes across different feature maps. boxes for inner feature maps\n # are scaled much lower to detect small objects\n obj_scales = {'conv4_3': 0.1, 'conv7': 0.21, 'conv8_2': 0.255, 'conv9_2': 0.30}\n\n # Defined aspect ratio calculated from mean of (w/h) across all bounding boxes\n # from the dataset. The mean is 0.66 with deviation of 0.07. So aspect ratio is kept\n # at 0.66 for all feature maps\n aspect_ratios = {'conv4_3': [0.5], 'conv7': [0.55], 'conv8_2': [0.6], 'conv9_2': [.66]}\n\n fmaps = list(feature_map_dims.keys())\n prior_boxes = []\n for k, fmap in enumerate(fmaps):\n # for each feature map, create k*k boxes\n for i in range(feature_map_dims[fmap]):\n for j in range(feature_map_dims[fmap]):\n # calculate center coordinates of boxes\n cx = (j + 0.5) / feature_map_dims[fmap]\n cy = (i + 0.5) / feature_map_dims[fmap]\n\n # For each\n for ratio in aspect_ratios[fmap]:\n prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt(ratio), obj_scales[fmap] / sqrt(ratio)])\n\n prior_boxes = torch.FloatTensor(prior_boxes).to(device) # (1930, 4)\n prior_boxes.clamp_(0, 1) # (1930, 4)\n\n return prior_boxes", "def minimal(self) -> typing.Tuple[str, ...]:\n return self.lattice._context._minimal(self._extent,\n self._intent).members()", "def __init__(self):\n self.capacity = 1000\n self.data = [None]*self.capcity", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\r\n self.stack = []\r\n self.minEle = 2**31 - 1", "def empty():\n return CAT([], 0, 0, active=False)", "def __init__(self, size = 0):\n self.data = []\n self.size = size", "def __init__(self):\n self.arr = []\n self.min = 9999", "def __init__(self):\n self.stack = []\n self.min_vals = []", "def get_boxes():\n boxes = []\n\n box_sizes = [256]\n left_x_cords = [x for x in range(0,1280,12)]\n top_y_cords = [y for y in range(360,720,12)]\n\n for box_size in box_sizes:\n for x_cord in left_x_cords:\n for y_cord in top_y_cords:\n if box_size+x_cord < 1280 and box_size+y_cord < 720:\n boxes.append([x_cord, y_cord, x_cord+box_size, y_cord+box_size])\n\n return boxes", "def create_boxes(plot_data, size=1):\n fixed_boxes = plot_data.buffer(size).envelope\n \n fixed_boxes = gpd.GeoDataFrame(geometry=fixed_boxes)\n \n #Mimic the existing structure\n fixed_boxes = gpd.sjoin(fixed_boxes, plot_data)\n fixed_boxes[\"score\"] = None\n fixed_boxes[\"label\"] = \"Tree\" \n fixed_boxes[\"xmin\"] = None \n fixed_boxes[\"xmax\"] = None\n fixed_boxes[\"ymax\"] = None\n fixed_boxes[\"ymin\"] = None\n \n fixed_boxes[\"box_id\"] = fixed_boxes.index.to_series().apply(lambda x: \"fixed_box_{}\".format(x))\n \n return fixed_boxes", "def create_boxes(plot_data, size=1):\n fixed_boxes = plot_data.buffer(size).envelope\n \n fixed_boxes = gpd.GeoDataFrame(geometry=fixed_boxes)\n \n #Mimic the existing structure\n fixed_boxes = gpd.sjoin(fixed_boxes, plot_data)\n fixed_boxes[\"score\"] = None\n fixed_boxes[\"label\"] = \"Tree\" \n fixed_boxes[\"xmin\"] = None \n fixed_boxes[\"xmax\"] = None\n fixed_boxes[\"ymax\"] = None\n fixed_boxes[\"ymin\"] = None\n \n fixed_boxes[\"box_id\"] = fixed_boxes.index.to_series().apply(lambda x: \"fixed_box_{}\".format(x))\n \n return fixed_boxes", "def union_boxes(boxes: List[\"Box\"]) -> \"Box\":\n left, top, right, bottom = float(\"inf\"), float(\"inf\"), float(\"-inf\"), float(\"-inf\")\n for box in boxes:\n l, t, r, b = box.coordinates\n left = min(left, l)\n top = min(top, t)\n right = max(right, r)\n bottom = max(bottom, b)\n return Box(left, top, right - left, bottom - top)", "def __init__(self):\n self.list_x = []", "def AutoBox(\n widgets: [Gtk.Widget], vspacing: int = 10, hspacing: int = 10,\n orientation: Gtk.Orientation = Gtk.Orientation.VERTICAL) -> Gtk.Box:\n # {{{\n\n box = Gtk.Box.new(\n orientation,\n vspacing if orientation == Gtk.Orientation.VERTICAL else hspacing\n )\n\n sub_orientation = 1 - orientation\n for x in widgets:\n if isinstance(x, list):\n x = AutoBox(x, vspacing, hspacing, sub_orientation)\n\n if len(widgets) == 1:\n return x\n\n if isinstance(x, Gtk.Widget):\n box.pack_start(x, True, True, 0)\n\n if not box.get_children():\n return None\n\n return box\n # }}}", "def fill_empty_geomean_vals(product):\n\theaders = ['chemaxon', 'epi', 'test', 'sparc', 'geomean', 'measured']\n\tfor prop_data_list in product['data']:\n\t\tif len(prop_data_list) < len(headers):\n\t\t\tprop_data_list.append('')\n\treturn product", "def not_empty(entry):\n gt_boxes = entry['boxes']\n return gt_boxes.shape[0] > 0", "def __init__(self):\n self.data = [[] for i in range(self._MOD)]", "def _just_a_box():\n {\n \"pgf\": {\n \"texsystem\": \"\", \n \"debug\": \"\", \n \"rcfonts\": \"\", \n \"preamble\": \"\"\n }, \n \"verbose\": {\n \"level\": \"\", \n \"fileo\": \"\"\n }, \n \"figure\": {\n \"facecolor\": \"\", \n \"titlesize\": \"\", \n \"titleweight\": \"\", \n \"figsize\": \"\", \n \"max_open_warning\": \"\", \n \"edgecolor\": \"\", \n \"dpi\": \"\", \n \"frameon\": \"\", \n \"autolayout\": \"\"\n }, \n \"savefig\": {\n \"transparent\": \"\", \n \"facecolor\": \"\", \n \"pad_inches\": \"\", \n \"orientation\": \"\", \n \"format\": \"\", \n \"jpeg_quality\": \"\", \n \"directory\": \"\", \n \"edgecolor\": \"\", \n \"dpi\": \"\", \n \"frameon\": \"\", \n \"bbox\": \"\"\n }, \n \"text\": {\n \"color\": \"\", \n \"antialiased\": \"\", \n \"hinting\": \"\", \n \"hinting_factor\": \"\", \n \"usetex\": \"\"\n }, \n \"image\": {\n \"resample\": \"\", \n \"cmap\": \"\", \n \"composite_image\": \"\", \n \"interpolation\": \"\", \n \"lut\": \"\", \n \"aspect\": \"\", \n \"origin\": \"\"\n }, \n \"examples\": {\n \"directory\": \"\"\n }, \n \"axes3d\": {\n \"grid\": \"\"\n }, \n \"font\": {\n \"fantasy\": \"\", \n \"monospace\": \"\", \n \"weight\": \"\", \n \"serif\": \"\", \n \"family\": \"\", \n \"stretch\": \"\", \n \"variant\": \"\", \n \"cursive\": \"\", \n \"style\": \"\", \n \"size\": \"\"\n }, \n \"contour\": {\n \"corner_mask\": \"\", \n \"negative_linestyle\": \"\"\n }, \n \"backend\": {\n \"qt4\": \"\", \n \"qt5\": \"\"\n }, \n \"ps\": {\n \"useafm\": \"\", \n \"papersize\": \"\", \n \"usedistiller\": \"\", \n \"fonttype\": \"\"\n }, \n \"axes\": {\n \"labelweight\": \"\", \n \"facecolor\": \"\", \n \"axisbelow\": \"\", \n \"titlesize\": \"\", \n \"titleweight\": \"\", \n \"labelpad\": \"\", \n \"prop_cycle\": \"\", \n \"ymargin\": \"\", \n \"labelcolor\": \"\", \n \"unicode_minus\": \"\", \n \"hold\": \"\", \n \"autolimit_mode\": \"\", \n \"linewidth\": \"\", \n \"xmargin\": \"\", \n \"edgecolor\": \"\", \n \"titlepad\": \"\", \n \"labelsize\": \"\", \n \"grid\": \"\"\n }, \n \"markers\": {\n \"fillstyle\": \"\"\n }, \n \"hist\": {\n \"bins\": \"\"\n }, \n \"polaraxes\": {\n \"grid\": \"\"\n }, \n \"animation\": {\n \"convert_path\": \"\", \n \"frame_format\": \"\", \n \"embed_limit\": \"\", \n \"html\": \"\", \n \"html_args\": \"\", \n \"avconv_args\": \"\", \n \"codec\": \"\", \n \"bitrate\": \"\", \n \"ffmpeg_args\": \"\", \n \"ffmpeg_path\": \"\", \n \"convert_args\": \"\", \n \"writer\": \"\", \n \"avconv_path\": \"\"\n }, \n \"tk\": {\n \"window_focus\": \"\"\n }, \n \"hatch\": {\n \"color\": \"\", \n \"linewidth\": \"\"\n }, \n \"boxplot\": {\n \"bootstrap\": \"\", \n \"patchartist\": \"\", \n \"meanline\": \"\", \n \"vertical\": \"\", \n \"showfliers\": \"\", \n \"showbox\": \"\", \n \"notch\": \"\", \n \"showmeans\": \"\", \n \"whiskers\": \"\", \n \"showcaps\": \"\"\n }, \n \"docstring\": {\n \"hardcopy\": \"\"\n }, \n \"errorbar\": {\n \"capsize\": \"\"\n }, \n \"xtick\": {\n \"direction\": \"\", \n \"labelbottom\": \"\", \n \"alignment\": \"\", \n \"labeltop\": \"\", \n \"color\": \"\", \n \"bottom\": \"\", \n \"top\": \"\", \n \"labelsize\": \"\"\n }, \n \"ytick\": {\n \"direction\": \"\", \n \"right\": \"\", \n \"alignment\": \"\", \n \"color\": \"\", \n \"labelright\": \"\", \n \"labelleft\": \"\", \n \"left\": \"\", \n \"labelsize\": \"\"\n }, \n \"grid\": {\n \"alpha\": \"\", \n \"color\": \"\", \n \"linewidth\": \"\", \n \"linestyle\": \"\"\n }, \n \"mathtext\": {\n \"it\": \"\", \n \"fontset\": \"\", \n \"default\": \"\", \n \"tt\": \"\", \n \"cal\": \"\", \n \"sf\": \"\", \n \"bf\": \"\", \n \"rm\": \"\", \n \"fallback_to_cm\": \"\"\n }, \n \"path\": {\n \"simplify\": \"\", \n \"sketch\": \"\", \n \"snap\": \"\", \n \"effects\": \"\", \n \"simplify_threshold\": \"\"\n }, \n \"legend\": {\n \"shadow\": \"\", \n \"facecolor\": \"\", \n \"markerscale\": \"\", \n \"loc\": \"\", \n \"handleheight\": \"\", \n \"borderaxespad\": \"\", \n \"scatterpoints\": \"\", \n \"numpoints\": \"\", \n \"framealpha\": \"\", \n \"columnspacing\": \"\", \n \"handlelength\": \"\", \n \"fontsize\": \"\", \n \"edgecolor\": \"\", \n \"labelspacing\": \"\", \n \"frameon\": \"\", \n \"fancybox\": \"\", \n \"handletextpad\": \"\", \n \"borderpad\": \"\"\n }, \n \"svg\": {\n \"hashsalt\": \"\", \n \"image_inline\": \"\", \n \"fonttype\": \"\"\n }, \n \"lines\": {\n \"solid_capstyle\": \"\", \n \"markersize\": \"\", \n \"antialiased\": \"\", \n \"dotted_pattern\": \"\", \n \"scale_dashes\": \"\", \n \"solid_joinstyle\": \"\", \n \"color\": \"\", \n \"dashdot_pattern\": \"\", \n \"markeredgewidth\": \"\", \n \"dashed_pattern\": \"\", \n \"linewidth\": \"\", \n \"marker\": \"\", \n \"dash_joinstyle\": \"\", \n \"dash_capstyle\": \"\", \n \"linestyle\": \"\"\n }, \n \"patch\": {\n \"edgecolor\": \"\", \n \"antialiased\": \"\", \n \"facecolor\": \"\", \n \"linewidth\": \"\", \n \"force_edgecolor\": \"\"\n }, \n \"keymap\": {\n \"fullscreen\": \"\", \n \"quit\": \"\", \n \"grid_minor\": \"\", \n \"all_axes\": \"\", \n \"yscale\": \"\", \n \"quit_all\": \"\", \n \"save\": \"\", \n \"back\": \"\", \n \"zoom\": \"\", \n \"xscale\": \"\", \n \"home\": \"\", \n \"pan\": \"\", \n \"forward\": \"\", \n \"grid\": \"\"\n }, \n \"webagg\": {\n \"port_retries\": \"\", \n \"address\": \"\", \n \"open_in_browser\": \"\", \n \"port\": \"\"\n }, \n \"pdf\": {\n \"use14corefonts\": \"\", \n \"compression\": \"\", \n \"inheritcolor\": \"\", \n \"fonttype\": \"\"\n }, \n \"scatter\": {\n \"marker\": \"\"\n }\n }", "def fillbox(self,event=None):\n \n pass", "def provide_data(self):\n return [(k, v.shape) for k, v in self.data]", "def checkio(data):\n res_lst = []\n print(\"\")\n print(\"test data: \", data)\n left = findLeftBottomPoint(data)\n print(f\"Left point is {left}\")\n fill_points(data, left, res_lst)\n print(\"result list: \", res_lst)\n return res_lst", "def empty(self):" ]
[ "0.6248323", "0.60343033", "0.58595824", "0.5818633", "0.5777445", "0.57357603", "0.5732781", "0.57044137", "0.5676265", "0.56664705", "0.5627507", "0.55881584", "0.558191", "0.5553013", "0.5534371", "0.55328286", "0.5518715", "0.54862744", "0.5479372", "0.54552704", "0.54142094", "0.53871405", "0.53761846", "0.5376075", "0.5369254", "0.5366624", "0.5358046", "0.5351717", "0.53428143", "0.53379685", "0.5318422", "0.53091747", "0.5308962", "0.5305304", "0.53003585", "0.5296634", "0.52897394", "0.5286306", "0.5285513", "0.52691716", "0.52665955", "0.52612513", "0.52597773", "0.52597773", "0.52545244", "0.52522093", "0.5244685", "0.5242657", "0.52413625", "0.5235453", "0.523403", "0.5225031", "0.5223689", "0.5216448", "0.5216448", "0.5214329", "0.5212145", "0.52005017", "0.5198168", "0.5188725", "0.51834685", "0.51766515", "0.51741993", "0.51729614", "0.5170885", "0.5170885", "0.5170114", "0.5159649", "0.5153457", "0.5151612", "0.5147316", "0.51388925", "0.51386434", "0.51385427", "0.51315874", "0.5123877", "0.51236796", "0.51236796", "0.51236796", "0.51236796", "0.51236796", "0.51236796", "0.5121954", "0.5120174", "0.5110535", "0.5099242", "0.5096564", "0.50789404", "0.5067703", "0.5067703", "0.50642735", "0.50619394", "0.50591457", "0.50571936", "0.5045907", "0.50456274", "0.5041961", "0.50385594", "0.50344324", "0.50305045", "0.502738" ]
0.0
-1
Return a list of dictionaries that only contain values for keys
def exclusively(self, keys, lst=None): minimal = self.minimal() if lst is None else lst def make_exclusive(d, keys): dct = {} for k in keys: if k in d: dct[k] = d[k] else: dct[k] = -999 return dct lst = [] for d in minimal: dct = make_exclusive(d, keys) if len(dct) > 0: lst.append(dct) return lst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dict_filter(indict, key_list):\n \n return dict((key, value) for key, value in list(indict.items()) if key in key_list)", "def _filter_keys(item, keys):\n return dict((k, v) for k, v in item.iteritems() if k in keys)", "def exclude(m, keys):\n return {k: v for k, v in m.items() if k not in keys}", "def _filter_keys(d: dict, keys: set) -> dict:\n return {key: d[key] for key in keys if key in d}", "def filter_keys_out(items, keys):\n for key, value in items.items():\n if key in keys:\n continue\n yield key, value", "def task_2_remove_dict_fields(data: DT, redundant_keys: List[str]) -> DT:\n return [{key: value for key, value in dic.items() if key not in redundant_keys} for dic in data]", "def filter_record_keys(record_list, whitelist_keys):\n\n filtered = [{k: v for k, v in [y for y in list(x.items()) if y[0] in whitelist_keys]} for x in record_list]\n return filtered", "def _pick(d, keys):\n return {k: v for k, v in d.items() if k in keys}", "def dicts(self, value=None):\n if value is None:\n return [dict(zip(self.keys, line)) for line in self.data]\n return [dict(zip(self.keys, line)) for line in self.data if value in line]", "def select_keys(my_dict: Dict, keys: Sequence) -> Dict:\n keyset = set(keys)\n return {k: v for k, v in my_dict.items() if k in keyset}", "def filter_dic_by_keys(dic,allowed_keys):\n new_dic = {}\n for key in dic:\n if key in allowed_keys:\n new_dic[key] = dic[key]\n return new_dic", "def keepers(d: dict) -> dict:\n keep = {k: v for k, v in d.items() if v is not None}\n return keep", "def filter_keys_in_set(ds, keys):\n logger.info(\"For each element in the dataset, keeping only values with keys: %s.\", ', '.join(keys))\n\n def filter_keys(x):\n return {k: v for k, v in x.items() if k in keys}\n\n return ds.map(filter_keys, num_parallel_calls=TF_AUTOTUNE)", "def unique_dicts_by_value(d, key):\n return list({v[key]: v for v in d}.values())", "def without_keys(d, keys):\n return {x: d[x] for x in d if x not in keys}", "def subset_of_dict(dict, chosen_keys):\r\n return {key: value for key, value in dict.items() if key in chosen_keys}", "def dfilter(d: dict, *keys: Iterable, reverse=False) -> dict:\n return {k: v for k, v in d.items() if k in keys and not reverse or k not in keys and reverse}", "def filter_values(function, dictionary):\n return {k: v for k, v in dictionary.items() if function(v)}", "def remove_empty_values(_dict):\n return {k: v for k, v in list(_dict.items()) if v is not None}", "def clean_dict(dictionary):\n return {k: v for k, v in dictionary.items() if v}", "def select_features(d, keys):\n return {x: d[x] for x in d if x not in keys}", "def remove_empty_list(dictionary):\n\n return {k: v for k, v in dictionary.items() if v != []}", "def clean_dict_values(d: dict, rogue_values: list) -> dict:\n return {key: value for key, value in d.items() if not value in rogue_values}", "def filter_by_keys(self, keys):\n return list(filter(lambda item: item.keyword in set(keys), self._metadata))", "def unique_dicts(d):\n return [dict(y) for y in set(tuple(x.items()) for x in d)]", "def remove_skip_values(self, data):\n return {\n key: value for key, value in data.items()\n if value not in self.__skipvalues\n }", "def filter_valid_values(dictionary):\n return ((key, value)\n for key, value in six.iteritems(dictionary)\n if value is not None)", "def dict_filter(d, keys, into=dict):\n \n if hasattr(keys, \"__call__\"):\n f = keys\n keys = filter(f, d.keys())\n return into(map(lambda k:(k,d[k]), keys))", "def exclude_keys(dictionary: Mapping, keys: Sequence[Hashable]) -> dict:\n return {k: v for k, v in dictionary.items() if k not in keys}", "def get_dicts(self, clean=False):\n return list(self.iter_dicts(clean=clean))", "def compact_dict(source_dict):\n return {k: v for k, v in source_dict.items() if v is not None}", "def _filter_row_dicts(rows, field_titles):\n for row in rows:\n yield {\n key: value\n for key, value in row.items()\n if key in field_titles\n }", "def filter_dict(dictionary, pred):\n return dict((k, v) for k, v in dictionary.items() if pred(k, v))", "def nonull_dict(self):\n return {k: v for k, v in self.dict.items() if v and k != '_codes'}", "def get_all_dicts_by_key(pcb_data: List[Dict[str, Any]], key: str) -> List[Dict[str, Any]]:\n res: List[Dict[str, Any]] = list()\n for d in pcb_data:\n if isinstance(d, dict) and key in d.keys():\n res.append(d)\n return res", "def filter_output_dict(output_dict):\n global filter_ingredients\n if filter_ingredients:\n filtered_dict = {k: v for k, v in\n output_dict.iteritems() if\n all(filter_item in v['ingredients']\n for filter_item in filter_ingredients)}\n return filtered_dict\n else:\n return output_dict", "def GetDictionaryKeys(value, keys):\n return {key: value[key] for key in keys if key in value}", "def remove_duplicate_dicts(data: List[dict]) -> List[dict]:\n return [dict(y) for y in set(tuple(x.items()) for x in data)]", "def consense_dicts(dicts):\n all_keys = sorted(set(chain(*(dc for dc in dicts))))\n return {key: consense_values([dc[key] for dc in dicts if key in dc])\n for key in all_keys}", "def create_vals_no_total():\n return {key: val\n for key, val in create_values().items()\n if key != 'total'}", "def filter_dict(fdict, mask):\n\n if fdict is None:\n fdict = dict()\n\n if mask is None:\n mask = []\n\n return {k: v for (k, v) in fdict.items() if k in mask}", "def __removeDuplicateDictsFromList(self, listOfDicts: List[Dict[str, str]]) -> List[Dict[str, str]]:\n return list({frozenset(item.items()): item for item in listOfDicts}.values())", "def __filter(self, obj):\n filtered_keys = ['file_path', \"Data\", \"raw_block_data\", \"Reserved1\", \"raw\"]\n if isinstance(obj, list):\n return dict([t for t in obj if t[0] not in filtered_keys])\n elif isinstance(obj, dict):\n return {k: self.__filter(v) for k, v in obj.items()\n if k not in filtered_keys}\n else:\n return dict(obj)", "def _filter_kwargs(names, dict_):\n return {k: v for k, v in dict_.items() if k in names and v is not None}", "def non_none_dict(dikt: List[Tuple[str, Any]]) -> Dict[Any, Any]:\n return {k: v for k, v in dikt if v is not None}", "def fuse_dicts(ds):\n ks = set((k for d in ds for k in d.keys()))\n fused_d = {k: [] for k in ks}\n for d in ds:\n for k in ks:\n fused_d[k].append(d[k] if k in d else None)\n return fused_d", "def _filter_dict(src_dict, key_set):\n for k in set(src_dict.keys()) - key_set:\n src_dict.pop(k)", "def delete_duplicate(x):\n return list(dict.fromkeys(x))", "def filter_keys(data, keys=[]):\n # filter key\n for filter_key in keys:\n if filter_key in data:\n del data[filter_key]\n\n # filter sub dictionaries\n for _, value in data.items():\n if type(value) == dict:\n filter_keys(value, keys)", "def duplicates(self, x):\n return list(dict.fromkeys(x))", "def without_keys(keys):\n keys = frozenset(keys) # frozenset has efficient membership lookup\n return filter_keys_c(fnot(partial(operator.contains, keys)))", "def pick(m, *keys):\n return {k: v for k, v in m.items() if k in keys}", "def _cleanse_dict(original):\n return {k: v for k, v in original.items() if \"_pass\" not in k}", "def remove_by_keys(self, keys):\n return list(filter(lambda item: item.keyword not in set(keys), self._metadata))", "def getitems(self):\n return {k:self.get(k) for k in self.keys}", "def _filter_search_values(key: str, values: list, collection: list):\n return_data = []\n for item in collection:\n if any(val in values for val in item[key]):\n return_data.append(item)\n return return_data", "def filter_features(self):\n return {key: {k: v for k, v in value.items() if k in {NAME, TYPE, ACTIVE}} for key, value in self.to_dict().items()}", "def prune(bushy: dict) -> dict:\n pruned = dict()\n for key in bushy:\n if bushy[key]:\n pruned[key] = bushy[key]\n return pruned", "def filter_keys(func, a_dict):\n return dict((k, v) for (k, v) in a_dict.items() if func(k))", "def ignored_values(self):\r\n return dict()", "def only_some_keys(dic, *keys):\n ret = {}\n for key in keys:\n ret[key] = dic[key] # Raises KeyError.\n return ret", "def winnow_by_keys(dct, keys=None, filter_func=None):\n has = {}\n has_not = {}\n\n for key in dct:\n key_passes_check = False\n if keys is not None:\n key_passes_check = key in keys\n elif filter_func is not None:\n key_passes_check = filter_func(key)\n\n if key_passes_check:\n has[key] = dct[key]\n else:\n has_not[key] = dct[key]\n\n return WinnowedResult(has, has_not)", "def values(self):\n return [_ for _ in self._dict.values()]", "def filter(d: dict) -> dict:\n return {k: v for (k, v) in d.items() if k in TransportRobotAgent.serialized_fields}", "def safe_dict(d: dict) -> dict:\n return {k: v for k, v in d.items() if not any(chunk in k for chunk in [\"token\"])}", "def filterKeys(document, keys):\n return {key: document[key] for key in keys}", "def shallow(dict, exclude=None):\n\tnew_dict = {}\n\tfor key in dict:\n\t\tif not exclude or key not in exclude:\n\t\t\tnew_dict[key] = dict[key]\n\treturn new_dict", "def setup_dict(self, keys=None):\n keys = keys or []\n return {key: True for key in keys}", "def _cleanse_dict(original):\n return dict((k, v) for k, v in original.items() if \"_pass\" not in k)", "def omit(self, *keys):\n return _({k: self[k] for k in self._ if k not in keys})", "def dict_values(d):\n return list(d.values())", "def filter_dic_by_key_prefix(dic,key_prefix_list):\n new_dic = {}\n for key in dic:\n retain = True\n for prefix in key_prefix_list:\n if key.startswith(prefix):\n retain = False\n if retain:\n new_dic[key] = dic[key]\n return new_dic", "def drop_empty_iters(obj: dict) -> dict:\n if not isinstance(obj, (dict, list)):\n return obj\n if isinstance(obj, list):\n return [v for v in (drop_empty_iters(member) for member in obj) if v]\n\n return {k1: v1 for k1, v1\n in ((k2, drop_empty_iters(v2)) for k2, v2 in obj.items())\n if v1 not in ({}, [], None)}", "def select_keys(dictionary, keys):\n return dict((k, dictionary[k]) for k in keys\n if k in dictionary)", "def filter_args_dict(self, args):\n return dict((k,v) for (k,v) in viewitems(args) if self.has_arg(k))", "def keep_in_dictionary(self,dictionary,*keys):\r\n remove_keys = [k for k in dictionary if k not in keys]\r\n self.remove_from_dictionary(dictionary,*remove_keys)", "def get_values(self):\n values_dict = self.__class__.objects.values().get(pk=self.pk)\n d = {\n k: v for k, v in zip(values_dict.keys(), values_dict.values())\n if v is not None and v is not u''\n }\n return d", "def Exclude(*keys):\n\n def exclude(row):\n res = dict(row)\n for k in keys:\n if k in res:\n del res[k]\n return res\n\n return \"Exclude\" >> beam.Map(exclude)", "def _remove_keys(results: dict, remove: list) -> dict:\n removed = {}\n for key, val in results.items():\n if key not in remove:\n removed[key] = val\n return removed", "def flattenDict(inputDict: dict) -> list:\n return sum([[key] if type(value) is bool else [key, value] for key, value in inputDict.items() if value], list())", "def get_keys(request):\n\n keys=[]\n reports = Report.objects.all().exclude(institute = 'PUBMED')\n for report in reports:\n json_rep = report.report_json\n for el in json_rep.keys():\n if el not in keys:\n keys.append(el)\n json_resp = {'keys':keys}\n return JsonResponse(json_resp)", "def _key_vals(dict_):\n return [(key, val) for key, val in dict_.iteritems()]", "def get_valid_values_map(self, condition=False, remove_special=True):\n pkmap = {}\n for selection in self.selections.normal_values():\n rmap_pkmap = selection.get_valid_values_map(condition)\n for key in rmap_pkmap:\n if key not in pkmap:\n pkmap[key] = set()\n pkmap[key] |= set(rmap_pkmap[key])\n for key in self.get_parkey_map():\n if key not in pkmap:\n pkmap[key] = [] # flag a need for an unconstrained input\n if remove_special:\n specials = {\"ANY\",\"N/A\"}\n for key in pkmap: # remove specials like ANY or N/A\n if pkmap[key]:\n pkmap[key] = pkmap[key] - specials\n for key in pkmap: # convert to sorted lists\n pkmap[key] = sorted(pkmap[key])\n return pkmap", "def uniqueDicts(obj):\n return [json.loads(d) for d in set(json.dumps(r, sort_keys=True) for o in obj)]", "def getall(self, key):\n return self.values.get(key, [])", "def remove_duplicates(input_list):\n return list(dict.fromkeys(input_list))", "def get_all_blacklisted_as_dict(self):\n blacklist = self.get_all_blacklisted_as_list()\n R = dict()\n [ R.setdefault( x[0], [] ).append(x[1]) for x in blacklist ]\n return R", "def processReadings(self, readings):\r\n return {key:value for key, value in readings.items() if not set(key).issubset(self.symbols)}", "def filterKeys(document, use_these_keys):\n return {key: document[key] for key in use_these_keys}", "def dilute_dict(d, num_to_keep=None):\n if num_to_keep is not None:\n if num_to_keep < 0:\n num_to_keep = len(d) + num_to_keep\n keys_to_keep = random.sample(d, num_to_keep)\n d = {k: v for k, v in d.items() if k in keys_to_keep}\n return d", "def filter_content_keys(obj: Dict[Any, Any]) -> Dict[Any, Any]:\n return {k: v for k, v in obj.items() if not k.startswith(\"__\")}", "def _filter_pipeline_parameters(dct):\n return {k: v for k, v in dct.items() if k not in non_pipeline_parameter_names and k != dynamic_param_name}", "def _remove_empty_values(data: T) -> T:\n if not isinstance(data, dict):\n return data\n return {k: _remove_empty_values(v) for k, v in data.items() if v is not None}", "def dict_keys(d):\n return list(d.keys())", "def remove_keys_from_dict(dictionary, keys):\n\n # Copy dictionary\n dictionary_updated = dictionary.copy()\n try:\n [dictionary_updated.pop(key) for key in keys]\n except:\n print(\"Error: No ratio and sampling strategy parameters\")\n return dictionary_updated", "def collect_by_key(pair_iter):\n out = {}\n for (k, v) in pair_iter:\n out[k] = out.get(k, [])\n out[k].append(v)\n return out", "def remove_none_values(dict_):\r\n\r\n res = {}\r\n res.update((key, value) for key, value in dict_.iteritems() \\\r\n if value is not None)\r\n return res", "def removeDups(lst):\n\n return list(dict.fromkeys(lst) )", "def get_missing(dicts):\n for d in dicts:\n for k, v in d.items():\n d[k] = set([1, 2, 3, 4, 5, 6, 7, 8, 9]) - set(v)\n return dicts", "def filter_tags(d, tags):\n\treturn dict([(k,filter(lambda x: x in tags, v))\n\t\t\t\tfor k, v in d.iteritems() if v])" ]
[ "0.7157758", "0.6764101", "0.67270654", "0.6699573", "0.6676822", "0.6673541", "0.6655425", "0.66365665", "0.66270673", "0.6611581", "0.6571578", "0.65568775", "0.6545871", "0.6436229", "0.6432588", "0.6381771", "0.63771033", "0.6347461", "0.63027394", "0.62880474", "0.62869287", "0.6242114", "0.6226327", "0.6169943", "0.61549866", "0.61478144", "0.6137546", "0.61231184", "0.6122416", "0.61175287", "0.6106198", "0.60994434", "0.60975945", "0.60921735", "0.60617065", "0.6060648", "0.6043311", "0.6026569", "0.6023774", "0.602097", "0.600434", "0.6003053", "0.60012114", "0.59907943", "0.5985554", "0.59826577", "0.59702283", "0.59662026", "0.59602344", "0.59592605", "0.59547496", "0.5952765", "0.5951984", "0.592669", "0.59252596", "0.5922861", "0.5922844", "0.58976984", "0.5891614", "0.5890638", "0.5859751", "0.5852516", "0.5824477", "0.58237356", "0.5812332", "0.58030665", "0.57986635", "0.57897764", "0.5786436", "0.57800496", "0.5776882", "0.5763655", "0.5759216", "0.5757327", "0.57179576", "0.570322", "0.5702812", "0.5694132", "0.5680907", "0.56808156", "0.56754565", "0.5655771", "0.56522006", "0.5643762", "0.5637934", "0.56329906", "0.5619756", "0.56189203", "0.5607514", "0.5606652", "0.56054884", "0.56036776", "0.55951583", "0.5590192", "0.55840755", "0.5580602", "0.55762684", "0.55669826", "0.55666405", "0.5557442" ]
0.72772044
0
List Box data, merging rows with common independent values
def combined(self): d = self._combined return [c for key, c in d.items()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _merge(self, box_list):\n if isinstance(box_list, self.__class__):\n box_list = [box_list]\n for box in box_list:\n for row in box:\n row[IND] = len(self)\n self.append(row)\n self._combine(row)", "def _combine_omnipage_cell_list(table, inds, row_flag):\n if row_flag:\n row_or_col_list = [table[i, :] for i in inds]\n else:\n row_or_col_list = [table[:, i] for i in inds]\n return [' '.join(_unique_sorted([str(k) for k in j])).strip()\n for j in zip(*row_or_col_list)]", "def collapseRow(self, lst):\n collapsible = False\n length = len(lst)\n base = ecount = 0\n last_merge = None\n for i in range(length):\n if lst[i] == 0:\n ecount += 1\n elif base != i:\n if lst[i] == lst[base] and base != last_merge:\n lst[base] *= 2\n self.score += lst[base]\n lst[i] = 0\n last_merge = base\n collapsible = True\n elif lst[base] == 0:\n lst[base] = lst[i]\n lst[i] = 0\n collapsible = True\n elif i > base+1:\n lst[base+1] = lst[i]\n lst[i] = 0\n collapsible = True\n\n if lst[base+1] != 0:\n base += 1\n if ecount == length:\n collapsible = True\n\n return lst, collapsible", "def compareData(self) : \n logger.info('Comparing data ...')\n showAll = self.ui.showAll_checkBox.isChecked()\n selData = self.getSelectedRowData()\n\n if selData : \n assetName = selData[self.setCols.index('Asset Name')]\n rootLoc, rootExists = self.getRootLoc(assetName)\n \n if rootExists : \n abcAssetHero = str(self.ui.asset_lineEdit.text())\n abcShotHero = str(self.ui.shot_lineEdit.text())\n add = None \n remove = None\n\n if self.ui.compareCurrent_checkBox.isChecked() : \n if abcAssetHero : \n if self.ui.asset_radioButton.isChecked() : \n add, remove = sd_utils.compareLoc(rootLoc, abcAssetHero)\n\n if abcShotHero : \n if self.ui.shot_radioButton.isChecked() : \n add, remove = sd_utils.compareLoc(rootLoc, abcShotHero)\n\n else : \n add, remove = sd_utils.compareAbc(abcShotHero, abcAssetHero)\n\n self.ui.compare_listWidget.clear()\n \n if not showAll : \n if add : \n print 'add', add\n for item in add : \n self.addListWidgetItem(item, color=self.green)\n\n if remove : \n print 'remove', remove\n for item in remove : \n self.addListWidgetItem(item, color=self.red)\n\n if showAll : \n rootLocs = sd_utils.getSceneLocator(rootLoc)\n\n for item in rootLocs : \n color = [0, 0, 0]\n\n if item in remove : \n color = self.red \n\n self.addListWidgetItem(item, color=color)\n\n if add : \n for item in add : \n self.addListWidgetItem(item, color=self.green)\n\n else : \n logger.info('No set found')", "def merge_AllLeft(lsts):\r\n new_lsts = []\r\n for row in lsts:\r\n array1 = add_tiles(row)\r\n new_lsts.append(array1)\r\n lsts = new_lsts\r\n\r\n return lsts", "def misclist_build(self):\n\n self.MiscList.ClearAll()\n\n # Add column headers if necessary.\n if self.MiscList.GetColumnCount() == 0:\n self.MiscList.InsertColumn(0, 'Name', width=76)\n self.MiscList.InsertColumn(1, 'Value', width=67)\n\n misc_values = list(self.patch.engine.misc_data.values())\n for misc_index in range(len(misc_values)):\n misc_value = misc_values[misc_index]\n\n self.MiscList.InsertItem(misc_index, misc_value['name'])\n\n self.misclist_update_row(misc_index)\n\n self.list_autosize(self.MiscList)\n self.MiscList.Select(0, True)", "def removeDuplicatesInList(self, data):\n newDataList = []\n for i in data:\n if newDataList.count(i) == 0:\n newDataList.append(i)\n data.clear()\n data += newDataList", "def unique_rows(self):\n return list(set([coord[0] for coord in self.landscape]))", "def merge_rows(self, rows):\n for row in rows:\n yield tuple(self.merge_one_row(row, combine_measurements))", "def update_listed_eqns(self):\n self.clear_rightside()\n self.lst_eqns.clear()\n self.lst_eqns.addItems(list(self.eqn_data[self.cmb_sections.currentText()]))", "def mergeLumis(inputdata, lumimask):\n mergedlumis = LumiList()\n doublelumis = LumiList()\n for report in inputdata:\n doublelumis = mergedlumis & LumiList(runsAndLumis=report)\n mergedlumis = mergedlumis | LumiList(runsAndLumis=report)\n return mergedlumis.getCompactList(), (LumiList(compactList=lumimask) - mergedlumis).getCompactList(), doublelumis.getCompactList()", "def left_merge(self,list_to_merge):\n self.items = list_to_merge + self.items\n return self.items", "def data_list_wdl_merge(data_list1:list, data_list2:list) -> list:\n list_size = len(data_list1)\n merged_data_list = []\n for i in range(list_size):\n merged_data_list.append(pd.concat([data_list1[i],data_list2[i]]))\n return merged_data_list", "def getmergesets (lblob,prct,areaop=min): \n sz = len(lblob)\n bmerged = [False for i in range(sz)]\n for i,blob in enumerate(lblob): blob.ID = i # make sure ID assigned\n lmergeset = [] # set of merged blobs (boxes)\n for i in range(sz):\n blob0 = lblob[i]\n for j in range(sz):\n if i == j: continue\n blob1 = lblob[j]\n # if blob0.band != blob1.band: continue # NB: this was only used when preventing frequency band crossing!! (2/18/21)\n # enough overlap between bboxes? \n if blob0.getintersection(blob1).area() >= prct * areaop(blob0.area(),blob1.area()):\n # merge them\n bmerged[i]=bmerged[j]=True\n found = False\n for k,mergeset in enumerate(lmergeset): # determine if either of these bboxes are in existing mergesets\n if i in mergeset or j in mergeset: # one of the bboxes in an existing mergeset?\n found = True\n if i not in mergeset: mergeset.add(i) # i not already there? add it in\n if j not in mergeset: mergeset.add(j) # j not already there? add it in\n if not found: # did not find either bbox in an existing mergeset? then create a new mergeset\n mergeset = set()\n mergeset.add(i)\n mergeset.add(j)\n lmergeset.append(mergeset)\n return lmergeset, bmerged", "def completeMerge(self):\n #--Remove lists that aren't the sum of at least two esps.\n srcMods = self.srcMods\n for levls in (self.levcs,self.levis):\n for listId in levls.keys():\n if len(srcMods[listId]) < 2 or levls[listId].isDeleted:\n self.records.remove(levls[listId])\n del levls[listId]\n del srcMods[listId]\n #--Log\n log = self.log\n for label, levls in (('Creature',self.levcs), ('Item',self.levis)):\n if not len(levls): continue\n log.setHeader(_('Merged %s Lists:') % (label,))\n for listId in sorted(levls.keys(),key=lambda a: a.lower() ):\n log(listId)\n for mod in srcMods[listId]:\n log(' '+mod)", "def _merge_row(self, row1, row2):\n\n duprow = list(row1)\n duprow.extend(list(row2))\n row1.clear()\n overlap_map = {}\n\n for body, overlap in duprow:\n if body not in overlap_map:\n overlap_map[body] = 0\n overlap_map[body] += overlap\n\n for body, overlap in overlap_map.items():\n row1.add((body, overlap))", "def _get_final_boxes(rows: List) -> List:\n final_boxes = []\n center = _get_center(rows)\n col_cnt = _get_shape_cnt(rows)\n for i in rows:\n lis = []\n for _ in range(col_cnt):\n lis.append([])\n for j in i:\n diff = abs(center - (j[0] + j[2] / 4))\n minimum = min(diff)\n indexing = list(diff).index(minimum)\n lis[indexing].append(j)\n final_boxes.append(lis)\n return final_boxes", "def combineData(self, dataLOLA, dataLOLB):\r\n map(lambda x: x[0].append(x[1][1]), zip(dataLOLA, dataLOLB))\r\n return dataLOLA", "def concatenate_data():", "def _findCboxItems(self, SQLquery):\n with sqlite3.connect(self.dbpath) as conn:\n cursor = conn.cursor()\n cursor.execute(SQLquery)\n find = cursor.fetchall()\n find = tools.listOfTuplesToList(find)\n # the inner padding in a combobox doesn't work, to simmulate the\n # padding on the left side, add a space at the begining of each line\n return tools.leftPadItems(find)", "def merge_rows(rows):\n new_rows = [rows[0]]\n for row in rows[1:]:\n if new_rows[-1]['gene'] == row['gene'] and row['vstart'] < new_rows[-1]['vend'] and row['vend'] > new_rows[-1]['vend']:\n new_rows[-1]['vend'] = row['vend']\n elif new_rows[-1]['gene'] != row['gene'] or new_rows[-1]['vend'] <= row['vstart']:\n new_rows.append(row)\n return new_rows", "def mergeGroup(self):\n if len(self) < 2:\n return\n mainItem = self[0]\n for item in self[1:]:\n mainItem.textLines.extend(item.textLines)\n mainItem.height = reduce(lambda x,y: x+y, [item.height for item in\n self])", "def create_widget_tuple_list (labels: list, values: list) -> list:\n _merged = tuple(zip(labels, values))\n return _merged", "def populateListCtrl(self):\n\t\t#self.cols = [self.beginner, self.intermediate, self.expert]\n\t\tself.cols = [self.beginner, self.expert]\n\t\tself.headervals = [\n\t\t[\"%ch1% threshold (Lower / Upper)\", \"\", \"\", 0],\n\t\t[\"%ch2% threshold (Lower / Upper)\", \"\", \"\", 0],\n\t\t[\"# of voxels > threshold (%ch1% / %ch2%)\", \"\", \"\", 0],\n\t\t[\"# of colocalized voxels\", \"\", \"\", 0],\n\t\t[\"% of %ch1% coloc. (voxels / intensity)\", \"\", \"\", 0],\n\t\t[\"% of %ch2% coloc. (voxels / intensity)\", \"\", \"\", 0],\n\t\t[\"M1\", \"\", \"\", 0],\n\t\t[\"M2\", \"\", \"\", 0],\n\t\t[\"P-Value\", \"\", \"\", 0],\n\t\t[\"% of volume colocalized\", \"\", \"\", 1],\n\t\t[\"% of %ch1% coloc. (total intensity)\", \"\", \"\", 1],\n\t\t[\"% of %ch2% coloc. (total intensity)\", \"\", \"\", 1],\n\t\t[\"Correlation\", \"\", \"\", 1],\n\t\t[\"Correlation (voxels > threshold)\", \"\", \"\", 1],\n\t\t[\"Correlation (voxels < threshold)\", \"\", \"\", 1],\n\t\t[\"Sum of %ch1% (total / over threshold)\", \"\", \"\", 1],\n\t\t[\"Sum of %ch2% (total / over threshold)\", \"\", \"\", 1],\n\t\t[\"# of non-zero voxels (%ch1% / %ch2%)\", \"\", \"\", 1],\n\t\t[\"Differ. stain of %ch1% to %ch2% (voxels / intensity)\", \"\", \"\", 1],\n\t\t[\"Differ. stain of %ch2% to %ch1% (voxels / intensity)\", \"\", \"\", 1],\n\t\t[\"% of diff. stain of %ch1% (voxels / intensity)\", \"\", \"\", 1],\n\t\t[\"% of diff. stain of %ch2% (voxels / intensity)\", \"\", \"\", 1],\n\t\t[\"R(obs)\", \"\", \"\", 1],\n\t\t[u\"R(rand) (mean \\u00B1 sd)\", \"\", \"\", 1],\n\t\t[\"R(rand) > R(obs)\", \"\", \"\", 1]\n\t\t]\n\t\t\n\t\t#if scripting.TFLag:\n\t\t\t# Remove diff stain & r(obs) from non-tekes version\n\t\t#\tself.headervals = self.headervals[:-7]\n\t\t\t#+ self.headervals[-3:]\n\n\t\tself.InsertColumn(0, \"Quantity\")\n\t\tself.InsertColumn(1, \"Value\")\n\t\t#self.InsertColumn(1,\"\")\n\t\t\n\t\tself.SetColumnWidth(0, 180)\n\t\tself.SetColumnWidth(1, 180)\n\t\tfor n, item in enumerate(self.headervals):\n\t\t\ttxt, a, b, col = item\n\t\t\tself.InsertStringItem(n, txt)\n\t\t\tself.SetItemTextColour(n, self.cols[col])\n\t\t\t#self.SetItemBackgroundColour(n, self.cols[col])", "def _merge_proposal_boxes_into_roidb(self, roidb, box_list):\n assert len(box_list) == len(roidb)\n for i, entry in enumerate(roidb):\n boxes = box_list[i]\n num_boxes = boxes.shape[0]\n gt_overlaps = np.zeros(\n (num_boxes, entry['gt_overlaps'].shape[1]),\n dtype=entry['gt_overlaps'].dtype\n )\n box_to_gt_ind_map = -np.ones(\n (num_boxes), dtype=entry['box_to_gt_ind_map'].dtype\n )\n\n # Note: unlike in other places, here we intentionally include all gt\n # rois, even ones marked as crowd. Boxes that overlap with crowds will\n # be filtered out later (see: _filter_crowd_proposals).\n gt_inds = np.where(entry['gt_classes'] > 0)[0]\n if len(gt_inds) > 0:\n gt_boxes = entry['boxes'][gt_inds, :]\n gt_classes = entry['gt_classes'][gt_inds]\n proposal_to_gt_overlaps = bbox_overlaps(\n boxes.astype(dtype=np.float32, copy=False),\n gt_boxes.astype(dtype=np.float32, copy=False)\n )\n # Gt box that overlaps each input box the most\n # (ties are broken arbitrarily by class order)\n argmaxes = proposal_to_gt_overlaps.argmax(axis=1)\n # Amount of that overlap\n maxes = proposal_to_gt_overlaps.max(axis=1)\n # Those boxes with non-zero overlap with gt boxes\n I = np.where(maxes > 0)[0]\n # Record max overlaps with the class of the appropriate gt box\n gt_overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]\n box_to_gt_ind_map[I] = gt_inds[argmaxes[I]]\n entry['boxes'] = np.append(\n entry['boxes'],\n boxes.astype(entry['boxes'].dtype, copy=False),\n axis=0\n )\n entry['gt_classes'] = np.append(\n entry['gt_classes'],\n np.zeros((num_boxes), dtype=entry['gt_classes'].dtype)\n )\n entry['seg_areas'] = np.append(\n entry['seg_areas'],\n np.zeros((num_boxes), dtype=entry['seg_areas'].dtype)\n )\n entry['gt_overlaps'] = np.append(\n entry['gt_overlaps'].toarray(), gt_overlaps, axis=0\n )\n entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps'])\n entry['is_crowd'] = np.append(\n entry['is_crowd'],\n np.zeros((num_boxes), dtype=entry['is_crowd'].dtype)\n )\n entry['box_to_gt_ind_map'] = np.append(\n entry['box_to_gt_ind_map'],\n box_to_gt_ind_map.astype(\n entry['box_to_gt_ind_map'].dtype, copy=False\n )\n )", "def merge(line):\r\n # Create a copy of the input list line\r\n list_copy=[]\r\n #adding the none zero elements of line to list_copy\r\n for dummy_i in range(len(line)):\r\n if line[dummy_i] != 0:\r\n list_copy.append(line[dummy_i])\r\n # adding the appropriate number of zeros to match the length of list_copy and line\r\n for dummy_j in range(len(list_copy),len(line)):\r\n list_copy.append(0)\r\n \r\n # merging the tiles that have the same value\r\n for dummy_k in range(len(list_copy)-1):\r\n # checking for equal values of the adjacent tiles \r\n if list_copy[dummy_k]!=0 and list_copy[dummy_k]==list_copy[dummy_k+1]:\r\n # if equal double the value of the first tile and assign zero to second tile\r\n list_copy[dummy_k]=2*list_copy[dummy_k]\r\n list_copy[dummy_k+1]=0\r\n \r\n #shifting the rest of the values ot the tiles by one place\r\n for dummy_p in range(dummy_k+1,len(list_copy)-1):\r\n list_copy[dummy_p]=list_copy[dummy_p+1]\r\n if (len(line)>3):\r\n list_copy[-2]=list_copy[-1]\r\n list_copy[-1]=0\r\n # returning list_copy which is the answer\r\n return list_copy", "def row_naked_twins(values):\n dual_values = [box for box in values.keys() if len(values[box]) == 2]\n \n for box in dual_values:\n for row_boxes in row_dict[box]:\n if values[row_boxes] == values[box]:\n loc_1 = values[box][0]\n loc_2 = values[box][1]\n \n modified_row = list(row_dict[box])\n modified_row.remove(row_boxes) #we do not want to remove the values from naked twins\n \n for modified in modified_row: #for all the OTHER columns:\n if len(values[modified]) == 1: #we do not want to remove values from solved entries\n modified_row.remove(modified)\n \n for row_boxes_2 in modified_row:\n try:\n values[row_boxes_2].remove(loc_1)\n except:\n pass\n try:\n values[row_boxes_2].remove(loc_2)\n except:\n pass\n \n\n \n return values", "def merge_list(self, new_list):\n self.student_list = self.student_list.append(new_list, sort=False, ignore_index=True)\n self.student_list.index = self.student_list.index + 1\n self.student_list.average = self.student_list[['midterm', 'finalterm']].mean(axis=1)\n self.student_list.grade = self.student_list.average.apply(self.calc_grade)", "def singleton_list(data_table):\n\tsingleton_list = []\n\tfor line in data_table:\n\t\tsingleton_list.append(Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n\t\t\n\treturn singleton_list", "def right_merge(self,list_to_merge):\n self.items = self.items + list_to_merge\n return self.items", "def merge(self):\n # Find which clusters/rows to merge\n self.segsChanged = True\n tomerge = []\n i = 0\n for cbox in self.cboxes:\n if cbox.checkState() != 0:\n tomerge.append(i)\n i += 1\n print('rows/clusters to merge are:', tomerge)\n if len(tomerge) < 2:\n return\n\n # Generate new class labels\n nclasses = self.nclasses - len(tomerge) + 1\n max_label = nclasses - 1\n labels = []\n c = self.nclasses - 1\n while c > -1:\n if c in tomerge:\n labels.append((c, 0))\n else:\n labels.append((c, max_label))\n max_label -= 1\n c -= 1\n\n # print('[old, new] labels')\n labels = dict(labels)\n # print(labels)\n\n keys = [i for i in range(self.nclasses) if i not in tomerge] # the old keys those didn't merge\n # print('old keys left: ', keys)\n\n # update clusters dictionary {ID: cluster_name}\n clusters = {0: self.clusters[tomerge[0]]}\n for i in keys:\n clusters.update({labels[i]: self.clusters[i]})\n\n print('before update: ', self.clusters)\n self.clusters = clusters\n print('after update: ', self.clusters)\n\n self.nclasses = nclasses\n\n # update the segments\n for seg in self.segments:\n seg[-1] = labels[seg[-1]]\n\n # update the cluster combobox\n #self.cmbUpdateSeg.clear()\n #for x in self.clusters:\n #self.cmbUpdateSeg.addItem(self.clusters[x])\n\n # Clean and redraw\n self.clearButtons()\n self.updateButtons()\n self.completeChanged.emit()", "def crosslistings(self):\n return self.primary_listing.listing_set.exclude(id=self.id)", "def merge(pair_list, threshold=need_to_fill):\n pair_list = list(pair_list)\n slope_list = []\n # Calculate slope of the starting coordinate of\n # every other box to the first box in the tuple.\n for pair in pair_list:\n # Get the first boxe's coordinate.\n # Due to the former process, the first box will be\n # the one on the left side.\n if len(pair) == 1:\n continue\n x0, y0, w0, h0 = pair[0]\n\n tmp_list = []\n for i in range(1, len(pair)):\n xi, yi, wi, hi = pair[i]\n # Take copy to exact up or below place into consideration.\n if xi == x0:\n if yi > y0:\n slope = float(\"inf\")\n else:\n slope = -float(\"inf\")\n else:\n slope = (yi - y0) * 1.0 / (xi - x0)\n # tmp list will look like\n # [slope1, slope2, ...]\n tmp_list.append(slope)\n\n # Slope list will look like\n # [[slope1, slope2...], [slope1, slope2...], ...]\n slope_list.append(tmp_list)\n\n # Then we will need to find pairs with same slope.\n # And cluster the boxes.\n # Here we take slope list as a vector and calculate their distance,\n # then use a threshold to get similar ones.\n results = []\n\n while len(slope_list) != 0:\n # Save tuples that should be merged.\n merge_boxes = []\n\n # np array will make euclide distance calculation more convienent.\n vector_r = np.array(slope_list[0])\n merge_boxes.append(pair_list[0])\n\n # Always keep pair list and slopelist corresponding.\n slope_list = slope_list[1:]\n pair_list = pair_list[1:]\n\n\n for vector, pair in zip(slope_list, pair_list):\n\n # While cauculating euclide diatance, we should take infinity\n # slope into consideration as numpy can not deal with such cases.\n vector_n = np.array(vector)\n\n inf_exist = False\n for slope in vector:\n if slope == float(\"inf\") or slope == -float(\"inf\"):\n inf_exist = True\n for slope in vector_r:\n if slope == float(\"inf\") or slope == -float(\"inf\"):\n inf_exist = True\n\n if inf_exist:\n # Calcuate distance with some pre procss.\n distance = distance_with_inf(vector_n, vector_r)\n else:\n # Calculate distance directly with numpy function.\n distance = np.linalg.norm(vector_n - vector_r)\n\n\n if distance <= threshold:\n merge_boxes.append(pair)\n slope_list.remove(vector)\n pair_list.remove(pair)\n\n\n # Then we process merge_boxes, merge them together and append it\n # to result.\n length = len(merge_boxes[0])\n merge_boxes = np.array(merge_boxes)\n tmp = []\n\n for i in range(0, length):\n tmp.append(get_bound(merge_boxes[:, i]))\n results.append(tmp)\n\n return results", "def combine_fiber_identlist(identlist_lst):\n identlist1 = list(identlist_lst.values())[0]\n newdescr = [descr for descr in identlist1.dtype.descr]\n # add a new column\n newdescr.insert(0, ('fiber', 'S1'))\n\n newidentlist = []\n for fiber, identlist in sorted(identlist_lst.items()):\n for row in identlist:\n item = list(row)\n item.insert(0, fiber)\n newidentlist.append(tuple(item))\n newidentlist = np.array(newidentlist, dtype=newdescr)\n\n return newidentlist", "def left_join_list_one():\n return[\n ['wrath', 'anger', 'delight'],\n ['fond', 'enamored', 'averse'],\n ['guide', 'usher', 'jam'],\n ['outfit', 'garb', 'follow'],\n ['diligent', 'employed', 'idle'],\n ]", "def concat_all(self):\n return self.merge(1)", "def merge_AllRight(lsts):\r\n new_lsts = []\r\n for row in lsts:\r\n array1 = merge_right(row)\r\n new_lsts.append(array1)\r\n lsts = new_lsts\r\n\r\n return lsts", "def _to_add_and_update(self):\n primitive_list=[]\n group_list=[]\n location_list = []\n #primitives\n for item in self.candidate_config:\n item_groupdef=group(item['name'], item['type'], item['ips'])\n if item['name'] in names(self.live_config):\n live_item=[x for x in self.live_config if x['name']==item['name']][0]\n added_ips=[x for x in item['ips'] if x not in live_item['ips']]\n primitive_list += ip_primitives(item['name'], added_ips)\n if item['type'] != live_item['type']:\n if item['type'] == 'ldirectord':\n primitive_list += ld_primitives(item['name'], item['loadbalancers'])\n if set(item_groupdef[0].split()) != set(group2(live_item)[0].split()):\n group_list += item_groupdef\n if item['loadbalancers'] != live_item['loadbalancers']:\n location_list += lborder(item['name'], item['loadbalancers'])\n else:\n primitive_list += primitives(item['name'], item['type'], item['ips'])\n group_list += item_groupdef\n location_list += lborder(item['name'], item['loadbalancers'])\n return '\\n'.join(primitive_list + group_list + location_list)", "def concat_duplicate_ids(self) -> None:\n # Rebuilt list instead of removing duplicated one at a time at the cost of O(n).\n self.data.clear()\n\n # This implementation takes advantage of the ordering of the duplicated in the __init__ method\n\n has_external_ids = set()\n for ext_id, items in self._external_id_to_item.items():\n if not isinstance(items, list):\n self.data.append(items)\n if items.id is not None:\n has_external_ids.add(items.id)\n continue\n concatenated = DatapointsArray.create_from_arrays(*items)\n self._external_id_to_item[ext_id] = concatenated\n if concatenated.id is not None:\n has_external_ids.add(concatenated.id)\n self._id_to_item[concatenated.id] = concatenated\n self.data.append(concatenated)\n\n if not (only_ids := set(self._id_to_item) - has_external_ids):\n return\n\n for id_, items in self._id_to_item.items():\n if id_ not in only_ids:\n continue\n if not isinstance(items, list):\n self.data.append(items)\n continue\n concatenated = DatapointsArray.create_from_arrays(*items)\n self._id_to_item[id_] = concatenated\n self.data.append(concatenated)", "def merge(line):\n lst = [0] * len(line) # we start with a 0-filled list.\n pos = 0 # index position in the new list\n pvl = 0 # we keep the previous value\n for val in line:\n if val: # we only care about the non zero values.\n if not pvl: # this tile is empty\n lst[pos] = val # let's fill with val\n pvl = val\n elif pvl - val: # different non zero values?\n pos += 1\n lst[pos] = val # tiles don't merge\n pvl = val\n else: # same values!\n lst[pos] <<= 1 # it merges!\n pos += 1\n pvl = 0 # next value is 0\n return lst", "def merge(line):\n #Step1. Putting 0 to the end of the list.\n result = []\n for cell in line:\n if cell != 0:\n result.append(cell)\n for cell in range(line.count(0)):\n result.append(0)\n #Step2. Replaced with a tile of twice the value and a zero tile\n for cell in range(len(result)-1):\n if result[cell] == result[cell+1] and len(result) != 1:\n result[cell] += result[cell]\n result[cell+1] = 0\n #Step3. Repeat step1\n final_result = []\n for cell in result:\n if cell != 0:\n final_result.append(cell)\n for cell in range(result.count(0)):\n final_result.append(0)\n return final_result", "def left_join_list_three():\n return[\n ['wrath', 'anger', None],\n ['fond', 'enamored', None],\n ['guide', 'usher', None],\n ['outfit', 'garb', None],\n ['diligent', 'employed', None],\n ]", "def _collect_interacts_items(self):\n self.bi_interacts = set() \n for i, t in enumerate(self.interacts):\n for j in t:\n self.bi_interacts.add((i, j) if i < j else (j, i))", "def left_join_list_two():\n return[\n ['wrath', 'anger', None],\n ['fond', 'enamored', 'averse'],\n ['guide', 'usher', 'jam'],\n ['outfit', 'garb', None],\n ['diligent', 'employed', 'idle'],\n ]", "def get_annotation_values(nested_annotation_column1, nested_annotation_column2):\n flat_list1 = [item for sublist in nested_annotation_column1 for item in sublist]\n flat_list2 = [item for sublist in nested_annotation_column2 for item in sublist]\n uniques = set(flat_list1 + flat_list2)\n return(list(uniques))", "def _merged_yview(self, *args):\n self._curr_scroll_row = round(float(args[1]) * len(self._items))\n self.key_listbox.yview(*args)\n self.value_listbox.yview(*args)", "def _combine(self, results_list):\n pass", "def items_for_result(cl, result, form):\n first = True\n pk = cl.lookup_opts.pk.attname\n for field_name in cl.list_display:\n row_class = ''\n try:\n f, attr, value = lookup_field(field_name, result, cl.model_admin)\n except (AttributeError, ObjectDoesNotExist):\n result_repr = EMPTY_CHANGELIST_VALUE\n else:\n if f is None:\n if field_name == u'action_checkbox':\n continue \n row_class = ' class=\"action-checkbox\"'\n allow_tags = getattr(attr, 'allow_tags', False)\n boolean = getattr(attr, 'boolean', False)\n if boolean:\n allow_tags = True\n result_repr = _boolean_icon(value)\n else:\n result_repr = smart_unicode(value)\n # Strip HTML tags in the resulting text, except if the\n # function has an \"allow_tags\" attribute set to True.\n allow_tags = True\n if not allow_tags:\n result_repr = escape(result_repr)\n else:\n result_repr = mark_safe(result_repr)\n else:\n if isinstance(f.rel, models.ManyToOneRel):\n field_val = getattr(result, f.name)\n if field_val is None:\n result_repr = EMPTY_CHANGELIST_VALUE\n else:\n result_repr = escape(field_val)\n else:\n result_repr = display_for_field(value, f)\n if isinstance(f, models.DateField)\\\n or isinstance(f, models.TimeField)\\\n or isinstance(f, models.ForeignKey):\n row_class = ' class=\"nowrap\"'\n if force_unicode(result_repr) == '':\n result_repr = mark_safe('&nbsp;')\n # If list_display_links not defined, add the link tag to the first field\n first = False # don't auto link\n cl.list_display_links = []\n if (first and not cl.list_display_links) or field_name in cl.list_display_links:\n table_tag = {True:'th', False:'td'}[first]\n first = False\n url = cl.url_for_result(result)\n # Convert the pk to something that can be used in Javascript.\n # Problem cases are long ints (23L) and non-ASCII strings.\n if cl.to_field:\n attr = str(cl.to_field)\n else:\n attr = pk\n value = result.serializable_value(attr)\n result_id = repr(force_unicode(value))[1:]\n yield mark_safe(u'<%s%s><a href=\"%s\"%s>%s</a></%s>' % \\\n (table_tag, row_class, url, (cl.is_popup and ' onclick=\"opener.dismissRelatedLookupPopup(window, %s); return false;\"' % result_id or ''), conditional_escape(result_repr), table_tag))\n else:\n # By default the fields come from ModelAdmin.list_editable, but if we pull\n # the fields out of the form instead of list_editable custom admins\n # can provide fields on a per request basis\n if (form and field_name in form.fields and not (\n field_name == cl.model._meta.pk.name and\n form[cl.model._meta.pk.name].is_hidden)):\n bf = form[field_name]\n result_repr = mark_safe(force_unicode(bf.errors) + force_unicode(bf))\n else:\n result_repr = conditional_escape(result_repr)\n if field_name == 'command_html':\n yield mark_safe(u'</tr><tr class=\"custom-code\" style=\"display:none\"><td colspan=\"%s\"><div class=\"code\" >%s</div></td>' % (len(cl.list_display)-1, result_repr))\n elif field_name == 'app':\n yield mark_safe(u'<td%s><h3>%s</h3></td>' % (row_class, result_repr))\n else:\n yield mark_safe(u'<td%s>%s</td>' % (row_class, result_repr))\n if form and not form[cl.model._meta.pk.name].is_hidden:\n yield mark_safe(u'<td>%s</td>' % force_unicode(form[cl.model._meta.pk.name]))", "def joinPrefixItems(self):\n newList = []\n mergeList = OutputGroup()\n for item in self:\n if mergeList and (item.level != mergeList[0].level or\n not item.prefix or\n not item.equalPrefix(mergeList[0])):\n mergeList.mergeGroup()\n newList.append(mergeList[0])\n mergeList[:] = []\n mergeList.append(item)\n if mergeList:\n mergeList.mergeGroup()\n newList.append(mergeList[0])\n self[:] = newList", "def apply(self):\n next_one = super().apply()\n next_both = set()\n\n for tup in next_one:\n if (tup[1], tup[0]) in next_one:\n next_both.add(tup)\n\n return list(next_both)", "def concat_old_ids(old, new):\n\n ids = [x for x in new.columns if 'ID' in x]\n\n for i, row in new.iterrows():\n info = pd.DataFrame()\n for c in ids:\n if row[c].find(',') != -1:\n for sp in row[c].split(' , '):\n info = info.append(old.loc[(old.Phase == row.Phase) & (old[c] == sp)])\n for col in info.columns:\n if col == 'Page' and len(pd.unique(info[col])) > 1:\n info.loc[:, col] = 'Varies'\n if '#' not in col and 'Description' not in col:\n info.loc[:, col] = ' , '.join([t for t in sorted(pd.unique(info[col])) if t])\n elif '#' in col:\n info.loc[:, col] = info.loc[:,col].sum()\n info = info.drop_duplicates()\n info.index = range(len(info))\n if not info.empty:\n for sp in row[c].split(' , '):\n old.loc[(old.Phase == row.Phase) & (old[c] == sp)] = info.loc[0].tolist()\n old = old.drop_duplicates()\n return old", "def intersect(self, other_list):\n assert type(other_list) == type(self)\n \n# if len(self.vals) >= len(other_list.vals):\n# big = self.vals\n# small = other_list.vals\n# else:\n# small = self.vals\n# big = other_list.vals\n# \n# common_list = intSet()\n# for e in big:\n# if e in small:\n# common_list.insert(e)\n# return common_list\n\n common_list = intSet() \n for e in self.vals:\n if other_list.member(e): #if the current e is a member of other_list\n common_list.insert(e)\n return common_list", "def add_list_data(self, data):\n\n # TODO: I'd like to use, say, a QListWidget or something, but controlling the widget\n # height on those was annoying, and I wanted the items to be easily copy+pasteable.\n # In the end I'm just going with a multiline QLabel inside a QScrollArea\n\n if len(data) == 0:\n return None\n\n scroll = QtWidgets.QScrollArea(self)\n scroll.setFrameShadow(QtWidgets.QFrame.Sunken)\n scroll.setFrameShape(QtWidgets.QFrame.Panel)\n w = QtWidgets.QLabel('<tt>{}</tt>'.format('<br/>'.join(data)), self)\n w.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)\n scroll.setWidget(w)\n self.grid.addWidget(scroll, self.cur_row, 1)\n return w", "def extract_filter_list(self, filter_type, elements):\n titleLabel = QLabel(filter_type)\n titleLabel.setStyleSheet('font: 20pt \"Imprint MT Shadow\"; color: #ffffff;')\n grid = QGridLayout()\n self.filterVbox.addWidget(titleLabel, alignment=Qt.AlignCenter)\n self.filterVbox.addLayout(grid)\n\n counter = 0\n for element in elements:\n nextLabel = QLabel(element)\n nextLabel.setStyleSheet('font: 12pt \"Times New Roman\"; color: rgb(188, 189, 177);')\n grid.addWidget(nextLabel, math.floor(counter/3), counter % 3, alignment=Qt.AlignCenter)\n counter += 1", "def _get_row(self, topology_list):\n new_row = []\n if len(topology_list) == 1:\n topology_list = topology_list[0]\n master = topology_list[0]\n slaves = topology_list[1]\n for slave in slaves:\n if len(slave) == 1:\n new_slave = slave[0]\n else:\n new_slave = slave\n new_row.append((master, new_slave[0]))\n new_row.extend(self._get_row(new_slave))\n return new_row", "def selectedValues(self):\n list_selected = []\n minLevel = float(self.minLevelLineEdit.text() or 0)\n maxLevel = float(self.maxLevelLineEdit.text() or 99)\n for text, checkbox in self.filter_checkBoxs.items():\n if checkbox.isChecked():\n list_selected.append(text)\n print(minLevel, maxLevel, list_selected)\n return minLevel, maxLevel, list_selected", "def mod_lista_oglindit(lista_1, lista_divizori):\n lista_finala = []\n for element in lista_1:\n if verifica_element_divide_lista(element, lista_divizori):\n oglindit = get_oglindit(element)\n lista_finala.append(oglindit)\n else:\n lista_finala.append(element)\n return lista_finala", "def combos_survey_list(survey_list):\n # TODO (jkp 2017-08-29): make better. make hashmaps one to the other\n select_args = (Indicator.code, DatalabData.char_grp1.code)\n joined = DatalabData.all_joined(*select_args)\n survey_list_sql = DatalabData.survey_list_to_sql(survey_list)\n filtered = joined.filter(survey_list_sql)\n results = filtered.distinct().all()\n indicator_dict = {}\n char_grp_dict = {}\n for item in results:\n this_indicator = item[0]\n this_char_grp = item[1]\n if this_indicator in indicator_dict:\n indicator_dict[this_indicator].add(this_char_grp)\n else:\n indicator_dict[this_indicator] = set([this_char_grp])\n if this_char_grp in char_grp_dict:\n char_grp_dict[this_char_grp].add(this_indicator)\n else:\n char_grp_dict[this_char_grp] = set([this_indicator])\n new_indicator_dict = {\n k: sorted(list(v)) for k, v in indicator_dict.items()\n }\n new_char_grp_dict = {\n k: sorted(list(v)) for k, v in char_grp_dict.items()\n }\n to_return = {\n 'indicators': new_indicator_dict,\n 'characteristicGroups': new_char_grp_dict\n }\n return to_return", "def test_merge_overlapping_boxes(self):\n # box2 contains box1\n box1 = ((1, 2), (1, 2), (1, 2))\n box2 = ((1, 3), (1, 3), (1, 3))\n mapping = {box1: [1, 2, 3, 4], box2: [1, 2, 3, 4, 5]}\n boxes = [box1, box2]\n merged_boxes, _ = dc.dock.binding_pocket.merge_overlapping_boxes(mapping,\n boxes)\n print(\"merged_boxes\")\n print(merged_boxes)\n assert len(merged_boxes) == 1\n assert merged_boxes[0] == ((1, 3), (1, 3), (1, 3))\n\n # box1 contains box2\n box1 = ((1, 3), (1, 3), (1, 3))\n box2 = ((1, 2), (1, 2), (1, 2))\n mapping = {box1: [1, 2, 3, 4, 5, 6], box2: [1, 2, 3, 4]}\n boxes = [box1, box2]\n merged_boxes, _ = dc.dock.binding_pocket.merge_overlapping_boxes(mapping,\n boxes)\n print(\"merged_boxes\")\n print(merged_boxes)\n assert len(merged_boxes) == 1\n assert merged_boxes[0] == ((1, 3), (1, 3), (1, 3))\n\n # box1 contains box2, box3\n box1 = ((1, 3), (1, 3), (1, 3))\n box2 = ((1, 2), (1, 2), (1, 2))\n box3 = ((1, 2.5), (1, 2.5), (1, 2.5))\n mapping = {\n box1: [1, 2, 3, 4, 5, 6],\n box2: [1, 2, 3, 4],\n box3: [1, 2, 3, 4, 5]\n }\n merged_boxes, _ = dc.dock.binding_pocket.merge_overlapping_boxes(mapping,\n boxes)\n print(\"merged_boxes\")\n print(merged_boxes)\n assert len(merged_boxes) == 1\n assert merged_boxes[0] == ((1, 3), (1, 3), (1, 3))", "def get_intersection_row(rows):\n final_row = []\n if len(rows) == 1:\n return rows[0]\n for j in range(len(rows[0])):\n counter = 0\n for i in range(len(rows) - 1):\n if rows[i][j] == rows[i + 1][j]:\n counter += 1\n if counter == len(rows) - 1:\n final_row.append(rows[0][j])\n else: final_row.append(Empty)\n return final_row", "def setResult(self, result, list):\n list.clear()\n for concept, subcategory, *other in result:\n item = QListWidgetItem()\n item.setData(Qt.UserRole, (concept, subcategory, *other))\n item.setText(\"{}{} {}\".format(concept.name,\n \"\" if subcategory.subcategory == \"\" else \", \",\n subcategory.subcategory))\n list.insertItem(0, item)", "def __get_selected_lists(self):\n selected = []\n for i, l in enumerate(self.lists):\n if self.cb_values[i].get():\n selected.append(l) \n return selected", "def construct_choice_lists(sheet):\n d = {}\n for row in range(1,sheet.nrows):\n c = {}\n for col in range(0,sheet.ncols):\n c[sheet.cell(0,col).value] = sheet.cell(row,col).value\n list_name = c.pop(\"list name\")\n if list_name in d:\n d[list_name].append(c)\n else:\n d[list_name] = [c]\n return d", "def merge(line):\n # create a list of non zero values from input\n input_size = len(line)\n line = [dummy_value for dummy_value in line if dummy_value > 0]\n \n # create an output list of same length as input with zero values\n line2 = [0] * input_size\n \n #update the output list with the non zero input list based on certain conditions\n line2[0:len(line)] = line\n \n pos = [dummy_no for dummy_no in range(0, len(line2))]\n \n for jos in pos[0:input_size -1]:\n if line2[jos] == line2[pos[jos+1]]:\n line2[jos] = line2[jos] + line2[pos[jos+1]]\n line2[jos+1] = 0\n \n # repeat last two steps above\n # create an output list of same length as input with zero values\n line2 = [dummy_val for dummy_val in line2 if dummy_val > 0]\n \n # create an output list of same length as input with zero values\n line3 = [0] * input_size\n \n #update the output list with the non zero input list \n line3[0:len(line2)] = line2\n \n return line3", "def concat_rows(self, frame_list: List):\n frame_list = list(frame_list)\n assert len(frame_list) > 0\n if len(frame_list) == 1:\n return frame_list[0]\n pl.concat(frame_list, how=\"vertical\")", "def _summarize_multi_leiden(self):\n # data: row is leiden run, column is cell\n data = self.leiden_result_df.T\n\n # group cell into raw clusters if their hamming distance < 1 - consensus_rate\n cur_cluster_id = 0\n clusters = {}\n while data.shape[1] > 1:\n seed_cell = data.pop(data.columns[0])\n distance = pairwise_distances(X=data.T, Y=seed_cell.values[None, :], metric=\"hamming\").ravel()\n\n judge = distance < (1 - self.consensus_rate)\n this_cluster_cells = [seed_cell.name] + data.columns[judge].to_list()\n for cell in this_cluster_cells:\n clusters[cell] = cur_cluster_id\n data = data.loc[:, ~judge].copy()\n cur_cluster_id += 1\n if data.shape[1] == 1:\n # if there is only one cell remain\n clusters[data.columns[0]] = cur_cluster_id\n clusters = pd.Series(clusters).sort_index()\n\n # renumber clusters based on cluster size\n counts = clusters.value_counts()\n cluster_map = {c: i for i, c in enumerate(counts.index)}\n clusters = clusters.map(cluster_map)\n # renumber small clusters as -1\n counts = clusters.value_counts()\n small_clusters = counts[counts < self.min_cluster_size].index\n clusters[clusters.isin(small_clusters)] = -1\n\n print(f\"{(clusters != -1).sum()} cells assigned to {clusters.unique().size - 1} raw clusters\")\n print(f\"{(clusters == -1).sum()} cells are multi-leiden outliers\")\n self._multi_leiden_clusters = clusters.values\n return", "def mergeWith(self, others):", "def generate_echo_picklist(self):\n concentrations = {\n comp: conc\n for comp, conc, _ in self.quantification_process.concentrations}\n dna_vols = []\n water_vols = []\n wells = []\n dest_wells = []\n sample_names = []\n dna_concs = []\n layout = self.plates[0].layout\n for row in layout:\n for well in row:\n composition = well.composition\n dna_vols.append(composition.dna_volume)\n water_vols.append(composition.water_volume)\n # For the source well we need to take a look at the gdna comp\n gdna_comp = composition.gdna_composition\n wells.append(gdna_comp.container.well_id)\n dest_wells.append(well.well_id)\n # For the sample name we need to check the sample composition\n sample_comp = gdna_comp.sample_composition\n sample_names.append(sample_comp.content)\n # For the DNA concentrations we need to look at\n # the quantification process\n dna_concs.append(concentrations[gdna_comp])\n\n # _format_picklist expects numpy arrays\n dna_vols = np.asarray(dna_vols)\n water_vols = np.asarray(water_vols)\n wells = np.asarray(wells)\n dest_wells = np.asarray(dest_wells)\n sample_names = np.asarray(sample_names)\n dna_concs = np.asarray(dna_concs)\n\n return NormalizationProcess._format_picklist(\n dna_vols, water_vols, wells, dest_wells=dest_wells,\n sample_names=sample_names, dna_concs=dna_concs)", "def mergeBboxes(bboxes, bboxes_prev):\n bboxes_merged = deepcopy(bboxes)\n for bbox in bboxes_prev:\n is_exist = False\n for bbox_merged in bboxes_merged:\n if bbox.object_id == bbox_merged.object_id:\n is_exist = True\n bbox_merged.visible = bbox.visible\n break\n if not is_exist:\n bboxes_merged.append(bbox)\n return bboxes_merged", "def merge_list(list1, list2, id_index=0):\r\n\tid_list1 = [row[id_index] for row in list1]\r\n\tduplicates = []\r\n\tfor row in list2:\r\n\t\tif row[id_index] in id_list1:\r\n\t\t\tduplicates.append(row)\r\n\t\telse:\r\n\t\t\tlist1.append(row)\r\n\treturn list1, duplicates", "def flatten(counts):\n single_names = {}\n long_names = {}\n for i in range(len(counts.items())):\n if(len(counts.items()[i][0].split(\" \")) <= 1):\n single_names[str(counts.items()[i][0])] = counts.items()[i][1]\n else:\n long_names[str(counts.items()[i][0])] = counts.items()[i][1]\n \n starter_list = [[[x[0]],x[1]] for x in long_names.items()]\n for i in range(len(single_names.items())):\n matched = False\n for j in range(len(starter_list)):\n if(single_names.items()[i][0] in starter_list[j][0][0].split(\" \")):\n starter_list[j][0].append(single_names.items()[i][0])\n starter_list[j][1] += single_names.items()[i][1]\n matched = True\n break\n \n if(matched == False):\n starter_list.append([[single_names.items()[i][0]], single_names.items()[i][1]]) \n \n \n return starter_list", "def combineFeatures(featurelist):\r\n \r\n cap_list = []\r\n for item in featurelist:\r\n cap_list.append(item.capitalize())\r\n features['Features'] = features[cap_list].apply(lambda x: ', '.join(x), axis=1)\r\n \r\n return features", "def _update_field_list(self, value):\n # Convert sets (e.g. draw/display/navmesh groups) to sorted lists so empty sets appear pretty.\n value_text = repr(sorted(value)) if not isinstance(value, list) else repr(value)\n self.value_label.var.set(value_text)\n self._activate_value_widget(self.value_label)", "def add_list_data_row(self, label, data):\n label_widget = self.add_label(label)\n data_widget = self.add_list_data(data)\n self.cur_row += 1\n return data_widget", "def same_rows(rows_list_1, rows_list_2):\n return sorted(rows_list_1) == sorted(rows_list_2)", "def get_rows(self, request, cl):\n rows = []\n # generate changelist attributes (e.g result_list, paginator, result_count)\n cl.get_results(request)\n empty_value_display = cl.model_admin.get_empty_value_display\n for result in cl.result_list:\n row = {}\n for field_name in cl.model_admin.list_display:\n try:\n _, _, value = lookup_field(field_name, result, cl.model_admin)\n # if the value is a Model instance get the string representation\n if value and isinstance(value, Model):\n result_repr = str(value)\n else:\n result_repr = value\n except ObjectDoesNotExist:\n result_repr = empty_value_display\n row[field_name] = result_repr\n rows.append(row)\n return rows", "def filter(self, row):\r\n return list(itertools.compress(row, self.selectors))", "def list(self):\n return self.cell.objects+self.cell.tempObjects", "def make_labels_common(self, results):\n labels = []\n keys = self.sort_keys( results )\n for label in keys:\n labels.append( str(label) )\n labels.reverse()\n return labels", "def get_results(self) -> List[str]:\n output = []\n for row in self.row_layout.children():\n if self.possible_values is None:\n text = row.itemAt(0).widget().text()\n else:\n text = row.itemAt(0).widget().currentText()\n\n if text != \"\":\n output.append(text)\n return output", "def merge(line):\n #create empty array equal to line\n outline = [0] * len(line)\n #index in output array\n index = 0\n #flag meaning we can merge\n can_merge = False\n for cell in line:\n if cell > 0:\n if can_merge:\n if cell == outline[index - 1]:\n #current cel equal to previous\n #let's merge them together\n outline[index - 1] += cell\n #and we can't merge more\n #for now\n can_merge = False\n else:\n #not equal just put it to next cell\n outline[index] = cell\n index += 1\n else:\n #we can't merge, so just put\n outline[index] = cell\n index += 1\n #and on next step we can merge\n can_merge = True\n return outline", "def create_all_combination(line):\n data = []\n total_vehi = line[useful_name.NUM_VEHICLE]\n all_vehi = [i for i in range(1,total_vehi +1)]\n for i in all_vehi:\n all_combi = list(combinations(all_vehi,i))\n\n for combi in all_combi:\n row = dict()\n row[useful_name.INPUT_FILE] = line[useful_name.INPUT_FILE]\n row[useful_name.CLUSTERING_METHOD] = line[useful_name.CLUSTERING_METHOD]\n row[useful_name.TIME_CST] = line[useful_name.TIME_CST]\n row[useful_name.CAPACITY_CST] = line[useful_name.CAPACITY_CST]\n row[useful_name.NUM_VEHICLE] = i\n tot_dist,dist_per_vehi = get_dist_for_combi(line,combi)\n row[useful_name.TOTAL_DISTANCE] = tot_dist\n row[useful_name.STOPS_PER_VEHICLE] = dist_per_vehi\n all_stops_id,stops_per_vehi = get_stop_for_combi(line,combi)\n row[useful_name.STOPS_PER_VEHICLE] = stops_per_vehi\n row[useful_name.ALL_STOPS_ID] = all_stops_id\n\n data.append(row)\n\n return data", "def mergeContacts(self):\n self.mergeDialog = MergeDialog(self.db, self.userList.getSelectedItems())\n self.mergeDialog.accepted.connect(self.refreshLists)\n self.mergeDialog.show()", "def rosterRowData(self):", "def list_of_labels(self):\n L = np.unique(self.box_label)\n return np.union1d(L, self.geom.list_of_elements_labels())", "def combine(self, patch):\n exclusive = set([\"config\", \"default\", \"mandatory\", \"presence\",\n \"min-elements\", \"max-elements\"])\n kws = set([s.keyword for s in self.slist]) & exclusive\n add = [n for n in patch.slist if n.keyword not in kws]\n self.slist.extend(add)", "def mergeWith(self,newLevl):\n #--Clear\n self.data = None\n self.setChanged()\n #--Merge settings\n self.isDeleted = newLevl.isDeleted\n self.chanceNone = newLevl.chanceNone\n self.calcFromAllLevels = self.calcFromAllLevels or newLevl.calcFromAllLevels\n self.calcForEachItem = self.calcForEachItem or newLevl.calcForEachItem\n #--Merge entries\n entries = self.entries\n oldEntries = set(entries)\n for entry in newLevl.entries:\n if entry not in oldEntries:\n entries.append(entry)\n #--Sort entries by pcLevel\n self.entries.sort(key=lambda a: a[0])", "def get_grid(self):\n self.fullws = []\n for row in self.word_search_grid:\n rowdata = []\n for column in row:\n rowdata += [column.entry.get()]\n self.fullws += [rowdata]\n self.logic.set_grid(self.fullws)", "def get_all_combinations(self):\n stuffs = map(lambda row: row.split(\" \"), self.expanded['GS'] )\n\n combs = self.all_combinations(stuffs)\n\n cls_repeated = self.expanded['CLS'].reset_index(drop=True)[np.array(combs[0])]\n\n A = cls_repeated.reset_index(drop=True)\n B = pd.Series(combs[1])\n\n combo_table = pd.DataFrame([A, B]).T\n\n combo_table.columns = ['CLS','GSCMB']\n\n df = combo_table\n\n df['srt'] = [ ' '.join(map(str, g)) for g in df[\"GSCMB\"] ]\n keep_idx = df[[0,2]].drop_duplicates().index\n gewd = df.iloc[keep_idx,:].reset_index(drop=True)[[\"CLS\",\"GSCMB\"]]\n\n combo_table = gewd\n\n combo_dict = combo_table.groupby('CLS')['GSCMB'].apply(lambda x: x.tolist())\n return combo_dict", "def calc_col_info(self):\n print('\\nCOLUMN VALUE INFORMATION\\n' +\n '----------------------------')\n results.append('\\nCOLUMN VALUE INFORMATION\\n' +\n '----------------------------')\n li = []\n for x in range(0, self.tot_col):\n print(str('\\n' + self.file_list[0][x]) +\n '\\n--------------') # Prints name of column\n\n results.append('\\n' + self.file_list[0][x] +\n '\\n--------------')\n\n for y in range(1, self.tot_rows + 1):\n li.append(self.file_list[y][x])\n li_no_empty = [x for x in li if x != ''] # List with no empty fields\n\n # MAX & MIN VALUE\n print('Maximum value: ' + str(max(li)))\n print('Minimum value: ' + str(min(li_no_empty)))\n results.append('Maximum value: ' + str(max(li)))\n results.append('Minimum value: ' + str(min(li_no_empty)))\n\n # MAX & MIN LENGTH\n li_b = []\n li_c = []\n for a in range(0, len(li)):\n li_b.append(len(li[a]))\n\n print('Maximum length: ' + str(max(li_b)))\n results.append('Maximum length: ' + str(max(li_b)))\n\n for b in range(0, len(li_no_empty)):\n li_c.append(len(li_no_empty[b]))\n\n print('Minimum length: ' + str(min(li_c)))\n results.append('Minimum length: ' + str(min(li_c)))\n\n del li_b[:]\n del li_c[:]\n\n # DISTINCT\n unique_set = set(li) # Counts blanks\n unique_set.discard('') # Does not account for null values\n unique_count = len(unique_set)\n\n print('Distinct values: ' + str(unique_count))\n results.append('Distinct values: ' + str(unique_count))\n\n # DUPLICATES\n value_count = {}\n for c in li:\n value_count[c] = value_count.get(c, 0) + 1\n dups = {key: value for key, value in value_count.items() if value > 1}\n sorted_dups = sorted(dups.items(), key=operator.itemgetter(1))\n\n print('\\nDuplicate values\\n' +\n '-------')\n results.append('\\nDuplicate values\\n' +\n '-------')\n\n for item in sorted_dups:\n print('{}'.format(str(item[0])) + ' : ' + str(item[1]))\n results.append('{}'.format(str(item[0])) + ' : ' + str(item[1]))\n\n # for key, num in dups.items():\n # print('{} : {}'.format(key, num))\n # results.append('{} : {}'.format(key, num))\n\n del li[:]", "def Filter(self,val):\n \n #set th elength of the lis to 0\n List = [self.InitialList[i] for i in range(0,len(self.InitialList))]\n FilterValues = [None]\n Grab = [None]\n Headers = []\n \n #create the quick index\n for i in range(len(self.Condensensed)):\n \n Headers.append([self.Condensensed[i][l][0] for l in range(len(self.Condensensed[i]))])\n \n #grab the values...\n for j in range(len(self.Variables)):\n \n FilterValues.append(self.Variables[j].get())\n\n if self.Variables[j].get().split(' ')[0] == 'All':\n \n Grab.append(False)\n \n else:\n \n Grab.append(True)\n \n #intermediate list to compare\n ToCompare = []\n \n for i in range(1,len(Grab)):\n \n if Grab[i]:\n \n #find the index\n l = Headers[i].index(FilterValues[i])\n \n #grab it\n ToCompare.append([self.Condensensed[i][l][m] for m in range(len(self.Condensensed[i][l]))])\n\n\n for i in range(0, len(ToCompare)):\n \n List = list(set(List).intersection(ToCompare[i]))\n\n #update the interface\n self.Gatherer(List,list(self.Input))\n self.BuildTree()", "def combine_list(lines):\n results = []\n for l in lines:\n results.extend(l)\n return list(filter(_remove, results))", "def change_entries(self, grid_sizer, options, border):\n\n if (self.diag_type is None): return grid_sizer # no data selected, don't change anything\n\n # setup the grid of possible values\n header0 = wx.StaticText(self, -1, \"Add/Remove\")\n header1 = wx.StaticText(self, -1, \"Quantity Code\")\n header2 = wx.StaticText(self, -1, \"Name\")\n header3 = wx.StaticText(self, -1, \"LaTeX Formula\")\n grid_sizer.Add(header0, pos=(0,0), flag=options, border=border)\n grid_sizer.Add(header1, pos=(0,1), flag=options, border=border)\n grid_sizer.Add(header2, pos=(0,2), flag=options, border=border)\n grid_sizer.Add(header3, pos=(0,3), flag=options, border=border)\n grid_sizer.Add(wx.StaticLine(self), pos=(1,0), span=(1,4),\n flag=wx.ALL|wx.EXPAND|wx.GROW, border=border)\n\n self.selected_values = [] # keep track of selected quantities\n\n quantities = self.output_quantities.diagnostic_types[self.diag_type]\n\n # choose a good height/width for formulas\n if (self.diag_type in [\"Linear_Forces\", \"Angular_Momentum\", \"Energy_Flux\",\n \"Induction\", \"Inertial_Forces\", \"Lorentz_Forces\",\n \"Poynting_Flux\", \"TurbKE_Budget\"]):\n width = 100\n elif (self.diag_type == \"Thermal_Equation\"):\n width = 150\n else:\n width = 30\n height = 20\n\n row = 2\n iquant = 0\n for Q in quantities:\n but = wx.ToggleButton(self, Q.code, \"Add\") # build button and place it in second column\n but.Bind(wx.EVT_TOGGLEBUTTON, self.OnToggle)\n grid_sizer.Add(but, pos=(row,0), flag=options, border=border)\n\n q_code = wx.StaticText(self, -1, str(Q.code)) # build other column entries\n q_name = wx.StaticText(self, -1, Q.name) # name\n\n formula = self.RenderTeX(Q, size=(width,height))\n\n # place column entries\n grid_sizer.Add(q_code, pos=(row,1), flag=options, border=border)\n grid_sizer.Add(q_name, pos=(row,2), flag=options, border=border)\n grid_sizer.Add(formula, pos=(row,3), flag=options, border=border)\n\n iquant += 1\n\n # add horizontal line every 5 quantities\n if (iquant % 5 == 0):\n grid_sizer.Add(wx.StaticLine(self), pos=(row+1,0), span=(1,4),\n flag=wx.ALL|wx.EXPAND|wx.GROW, border=border)\n row_inc = 2\n else:\n row_inc = 1\n\n row += row_inc\n\n grid_sizer.AddGrowableCol(2,1) # make the name/formula columns \"1\" growable, i.e., grows as necessary\n grid_sizer.AddGrowableCol(3,1)\n\n return grid_sizer", "def fixup(self, l):\n\n\n fudges = [ ('A', 'B'),\n ('E', 'F') ]\n\n for x,y in fudges:\n if x in l and y not in l:\n l += y\n if y in l and x not in l:\n l += x\n\n return l", "def dict_un_lists_intersection_test(self, data):\n\n data_info = self.get_data_info(data)\n finished = []\n\n for part in data:\n for union_part in data:\n union = []\n if part != union_part and union_part not in finished:\n for node in data[part]:\n if node in data[union_part]:\n union.append(node)\n finished.append(part)\n\n return data_info", "def data_list_put_together(data_list:list) -> pandas.core.frame.DataFrame:\n list_size = len(data_list)\n data = data_list[0]\n for i in range(1,list_size):\n data = data.append(data_list[i])\n return data", "def groupRows(rows, column):\n filteredRows = filterRows(lambda row: row[column] != '', rows)\n if not filteredRows:\n return []\n groups = [[]]\n index = 0\n lastData = filteredRows[0][column]\n for row in filteredRows:\n if lastData != row[column]:\n index += 1\n lastData = row[column]\n groups.append([row])\n else:\n groups[index].append(row)\n return [group for group in groups]", "def generateCandidates(self):\n\t\tprint(\"Candidate list:\\n\")\n\t\tkeys = list(self.prune_list.keys())\n\t\ttuple_count = len(keys[0])\n\t\tprune_list = {}\n\t\ttup = []\n\t\tfor v in comb(keys, 2):\n\t\t\ta = set(v[0])\n\t\t\tb = set(v[1])\n\t\t\t\n\t\t\t# If there's as many common element in a & b as one less than tuple_count\n\t\t\tif((len(a & b) == (tuple_count - 1)) and (tuple(a | b) not in tup)):\n\t\t\t\ttup.append(tuple(a | b))\n\t\t\t\tprint(tup[-1])\n\t\t\t\t# Update prune list\n\t\t\t\tcount = self.getSupport(tup[-1])\n\t\t\t\tif(count >= self.support):\n\t\t\t\t\tprune_list[tup[-1]] = count\n\t\treturn prune_list", "def merge(line):\n new_line = [0] * len(line)\n merged = [False] * len(line)\n pos = 0\n for item in line:\n if not item == 0:\n if new_line[pos - 1] == item and merged[pos - 1] == False:\n new_line[pos - 1] = item * 2\n merged[pos - 1] = True\n else:\n new_line[pos] = item\n pos += 1\n return new_line", "def _generate_rows(self):\n logger.debug(\"Generating pre-genealogical coherence data for %s\", self.w1)\n if not self.rows:\n for w2 in self.all_mss:\n if self.w1 == w2:\n continue\n self._add_row(w2)\n\n self._sort()\n logger.debug(\"Generated pre-genealogical coherence data for %s\", self.w1)", "def merge_ranges():" ]
[ "0.6294544", "0.5439905", "0.5388952", "0.5262811", "0.51284385", "0.50952107", "0.5083202", "0.50720966", "0.5040125", "0.49990922", "0.49892396", "0.49385926", "0.49356857", "0.49298894", "0.48937234", "0.4881302", "0.48691523", "0.4845252", "0.4821734", "0.48183888", "0.48109752", "0.47842267", "0.47526947", "0.47522303", "0.475077", "0.47426683", "0.47395074", "0.47353", "0.4731736", "0.4731734", "0.47242203", "0.4716281", "0.47141817", "0.4710718", "0.4703139", "0.46981817", "0.46759593", "0.4660451", "0.46588606", "0.46463653", "0.4645035", "0.46441513", "0.46318173", "0.46253872", "0.4624294", "0.46210453", "0.46115026", "0.4609666", "0.46036294", "0.459974", "0.459943", "0.45951593", "0.45886987", "0.4580118", "0.4575511", "0.45645994", "0.45530844", "0.45525575", "0.45496058", "0.45484838", "0.45428622", "0.4533245", "0.45307332", "0.45165852", "0.4505198", "0.4504928", "0.45034957", "0.4501868", "0.4499992", "0.4494472", "0.44893405", "0.4478602", "0.4477268", "0.44732308", "0.44721526", "0.44667742", "0.44599435", "0.44566825", "0.4456579", "0.44564077", "0.4455204", "0.44550866", "0.44517076", "0.444722", "0.44462985", "0.44443506", "0.4440485", "0.44339493", "0.44326037", "0.44278598", "0.44271177", "0.44258842", "0.4423202", "0.44185746", "0.44133943", "0.44126818", "0.44107234", "0.44054705", "0.4399563", "0.4389709", "0.43892515" ]
0.0
-1
Perform a merge operation
def _merge(self, box_list): if isinstance(box_list, self.__class__): box_list = [box_list] for box in box_list: for row in box: row[IND] = len(self) self.append(row) self._combine(row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge(): #Status: WIP\r\n pass", "def _merge(self):\n raise NotImplementedError", "def on_merge(self, to_be_merged, merge_result, context):\n pass", "def mergeWith(self, others):", "def merge(*args):\n from ..operators.observable.merge import merge_\n return merge_(*args)", "def merge(self, merge_with_id):\r\n params = base.get_params(None, locals())\r\n url = '{0}/merge'.format(self.get_url())\r\n return http.Request('POST', url, params), parsers.parse_json", "def merge(self, merge_with_id):\r\n params = base.get_params(None, locals())\r\n url = '{0}/merge'.format(self.get_url())\r\n return http.Request('POST', url, params), parsers.parse_json", "def merge(self, merge_with_id):\r\n params = base.get_params(None, locals())\r\n url = '{0}/merge'.format(self.get_url())\r\n return http.Request('POST', url, params), parsers.parse_json", "def merge(self, obj):\n pass", "def merge(self, ref, *args):\n return self.cmd('merge', ref, *args)", "def merge():\n click.echo(\"Not implemented yet. In the future, this command will be used for merging models.\")\n sys.exit(-2)", "def merge(mergeFiles,mergeDb,createDB,dbase,v,dfile):\n\tglobal verbose\n\n\tverbose = v\n\tif len(mergeFiles) > 0:\n\t\tfor f in mergeFiles:\n\t\t\tprint \"Merge => \"+ f\n\t\t\ttry:\n\t\t\t\tfl = open(f,'r')\n\t\t\t\tProcessEntryFile(fl)\n\t\t\t\tfl.close()\n\t\t\t\tif verbose >= 1:\n\t\t\t\t\tprint reference\n\t\t\texcept IOError:\n\t\t\t\tprint 'File '+f +' cannot be open'\n\n\tif len(mergeDb) > 0:\n\t\tfor f in mergeDb:\n\t\t\tprint \"Merge => \"+ f\n\t\t\tProcessEntryBase(f)\n\t\t\tif verbose >= 1:\n\t\t\t\tprint reference\n\t\n\tif dfile != '':\n\t\ttry:\n\t\t\tif os.path.exists(dfile):\n\t\t\t\tos.remove(dfile)\n\t\t\tfref = open(dfile,'w')\n\t\t\tput_in_file('',fref,reference)\n\t\t\tfref.close()\n\t\t\tif os.path.exists(afile):\n\t\t\t\tos.remove(afile)\n\t\t\tfref = open(afile,'w')\n\t\t\tput_in_afile('',fref,reference)\n\t\t\tfref.close()\n\t\texcept IOError:\n\t\t\tprint 'Cannot open '+dfile+' file'\n\n\tif dbase != '':\n\t\tput_in_db(dbase,reference,createDB)", "def test_merge_two_two():\n run_merge([1, 3], [2, 4], [1, 2, 3, 4])", "def merge(cls, analyses):\r\n raise NotImplementedError()", "def test_merge_two_two_same():\n run_merge([1, 3], [1, 3], [1, 1, 3, 3])", "def merge(self, op):\n highest_mergable = 0\n (head_src, bytestream_src) = self.deconstruct_tail()\n (bytestream_dst, tail_dst) = op.deconstruct_head()\n for ii in range(min(len(bytestream_src), len(bytestream_dst))):\n mergable = True\n for jj in range(ii + 1):\n if not bytestream_src[-ii - 1 + jj].mergable(bytestream_dst[jj]):\n mergable = False\n break\n if mergable:\n highest_mergable = ii + 1\n if 0 >= highest_mergable:\n return False\n if is_verbose():\n print(\"Merging headers %s and %s at %i bytes.\" % (self.__name, op.__name, highest_mergable))\n for ii in range(highest_mergable):\n bytestream_src[-highest_mergable + ii].merge(bytestream_dst[ii])\n bytestream_dst[0:highest_mergable] = []\n self.reconstruct(head_src + bytestream_src)\n op.reconstruct(bytestream_dst + tail_dst)\n return True", "def merge(a,b):\n c = a.copy()\n c.update(b)\n return c", "def test_merge_merges_two_pairs():\n L = [1, 3, 5]\n R = [2, 4, 6]\n assert merge(L, R) == [1, 2, 3, 4, 5, 6]", "def merge(*args):\n return _libsbml.Unit_merge(*args)", "def test_merge_one():\n run_merge([2], [1], [1, 2])", "def test_merge_repl(self):\n ars = self.ar[2009][11]['general']\n ars2 = awstats_reader.AwstatsReader(test_file_dir,\n 'joshuakugler.com')[2009][11]['general']\n self.assertEqual(ars.merge(ars2, 'LastLine', 'signature'), '')", "def do_merge(self, line):\n self.review.merge()", "def MergeLogic(self) -> str:", "def test_merge_one_two():\n L = [1, 3, 2]\n run_merge([1, 3], [2], [1, 2, 3])", "def merge(self, other_btree):\n pass", "def test_merge_sum(self):\n ars = self.ar[2009][11]['general']\n ars2 = awstats_reader.AwstatsReader(test_file_dir,\n 'joshuakugler.com')[2009][11]['general']\n self.assertEqual(ars.merge(ars2, 'LastUpdate', 'parsed'), 1262637)", "def merge(session_id, context, entities, msg):\n pass", "def merge_docs(self):", "def merge(src):\n ok, out, err = common.git_call('merge {0}'.format(src))\n return _parse_merge_output(ok, out, err)", "def hxlmerge():\n run_script(hxlmerge_main)", "def merge(self, first, second):\n return second if self.failed(first) else first", "def merge(self,best1,best2):\n\t\treturn self.cu_for_merge(best1,best2,False)", "def test_merge(self):\n A = [1,4,6,7,8,2,3,4,5,7]\n p = 0\n q = 5\n r = 10\n A_merged = mst.merge(A, p, q, r)\n expected = [1, 2, 3, 4, 4, 5, 6, 7, 7, 8] \n self.assertEqual(A, A_merged)", "def Merge(self, other):\n\n # Logging just in case\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: before\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: deleted\", %s);'\n %(other.persistant['id'], \n sql.FormatSqlValue('details',\n repr(other.persistant))))\n\n # Fields which can be summed\n for f in ['plays', 'skips']:\n self.persistant[f] = (self.persistant.get(f, 0) +\n other.persistant.get(f, 0))\n\n # Date fields where we take the newest\n for f in ['last_played', 'last_skipped', 'last_action']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a > b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Date fields where we take the oldest\n for f in ['creation_time']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a < b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Fields where we only clobber ours if we don't have a value\n for f in ['artist', 'album', 'song']:\n if not self.persistant.has_key(f) or not self.persistant[f]:\n self.persistant[f] = other.persistant.get(f, None)\n\n # Sometimes the number is a placeholder\n if self.persistant.has_key('number') and self.persistant['number'] == -1:\n self.persistant['number'] = other.persistant.get('number', -1)\n if not self.persistant.has_key('number'):\n self.persistant['number'] = other.persistant.get('number', -1)\n\n # Update the id in the tags table\n tags = self.db.GetRows('select tag from tags where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: tags: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(tags))))\n\n try:\n self.db.ExecuteSql('update tags set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n except:\n # This can happen if the is already a matching tag for the first track\n pass\n\n # Update the id in the paths table\n paths = self.db.GetRows('select path from paths where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: paths: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(paths))))\n \n self.db.ExecuteSql('update paths set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: after\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('commit;')", "def test_merge_empty():\n run_merge([], [], [])", "def _merge(x, y):\n for key in x:\n if key in y:\n x[key] = _merge(x[key], y[key])\n y[key] = None\n for key in y:\n if y[key] is not None:\n x[key] = y[key]\n return x", "def merged_rep(self,other):\n raise NotImplementedError(\"Abstract method\")", "def merge(self, other):\n extras = other.difference(self)\n if len(extras) > 0:\n self.update(extras)\n self.reset()\n return True\n return False", "def merge_data(self, merge_source, keys, tags, replace=False, overwrite=False, queries=[]):\n import hxl.filters\n return hxl.filters.MergeDataFilter(self, merge_source, keys, tags, replace, overwrite, queries=queries)", "def _merge(self, other: dict):\n self._storage = dict_merge(self._storage, other)", "def svn_fs_merge(*args):\r\n return _fs.svn_fs_merge(*args)", "def test_merge_aggregate_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = \"b\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)", "def merge(self, session, source_state, source_dict, dest_state,\n dest_dict, load, _recursive):\n\n pass", "def hallucinate_merge(self, other):\n res = CompleteVec(None,None,self.max_num_samples)\n res.needs_update = True\n return res", "def merge(self, db2):\n delta_db = Database(db2)\n\n # Find common headers between the master and delta databases\n common_headers = [x for x in self.headers if x in delta_db.headers]\n\n # Any new headers found in the delta are added to the master\n self.headers.extend(\n [x for x in delta_db.headers if x not in self.headers])\n\n if len(common_headers) < 1:\n print(\"No shared headers were found. These files cannot be merged.\")\n else:\n key = ''\n # Skip picker prompt if there is only one common header\n if len(common_headers) == 1:\n key = common_headers[0]\n else:\n key = self.headerpicker(common_headers)\n\n # Create a temp list for new records to be added to\n records_temp = list(self.records)\n\n # Iterate over new records and attempt to match to existing record\n for each in delta_db.records:\n record = self.fetch_record(key, each, records_temp)\n if record:\n record.attributes.update(each.attributes)\n\n self.records = records_temp\n print(\"Merge successful!\\n\")", "def self_merge(self, source_id, destination_id):\n self.vectors[destination_id].merge(self.vectors[source_id])", "def merge(self, skel):\n return Skeleton.simple_merge((self, skel)).consolidate()", "def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)", "def test_merge_on_unbalanced_lists():\n L = [2, 3, 4, 8]\n R = [1, 9, 10, 13]\n assert merge(L, R) == [1, 2, 3, 4, 8, 9, 10, 13]", "def merge(self, other):\n\n for n in other.cfg_nodes:\n self.insert_cfgnode(n)\n\n for ins_addr, outs in other.out_branches.items():\n if ins_addr in self.out_branches:\n for stmt_idx, item in outs.items():\n if stmt_idx in self.out_branches[ins_addr]:\n self.out_branches[ins_addr][stmt_idx].merge(item)\n else:\n self.out_branches[ins_addr][stmt_idx] = item\n\n else:\n item = next(iter(outs.values()))\n self.out_branches[ins_addr][item.stmt_idx] = item", "def action_merge(branch_dir, branch_props):\n # Check branch directory is ready for being modified\n check_dir_clean(branch_dir)\n\n source_revs, phantom_revs, reflected_revs, initialized_revs = \\\n analyze_source_revs(branch_dir, opts[\"source-url\"],\n find_reflected=\n should_find_reflected(branch_dir))\n\n if opts[\"revision\"]:\n revs = RevisionSet(opts[\"revision\"])\n else:\n revs = source_revs\n\n blocked_revs = get_blocked_revs(branch_dir, opts[\"source-pathid\"])\n merged_revs = opts[\"merged-revs\"]\n\n # Show what we're doing\n if opts[\"verbose\"]: # just to avoid useless calculations\n if merged_revs & revs:\n report('\"%s\" already contains revisions %s' % (branch_dir,\n merged_revs & revs))\n if phantom_revs:\n report('memorizing phantom revision(s): %s' % phantom_revs)\n if reflected_revs:\n report('memorizing reflected revision(s): %s' % reflected_revs)\n if blocked_revs & revs:\n report('skipping blocked revisions(s): %s' % (blocked_revs & revs))\n if initialized_revs:\n report('skipping initialized revision(s): %s' % initialized_revs)\n\n # Compute final merge set.\n revs = revs - merged_revs - blocked_revs - reflected_revs - \\\n phantom_revs - initialized_revs\n if not revs:\n report('no revisions to merge, exiting')\n return\n\n # When manually marking revisions as merged, we only update the\n # integration meta data, and don't perform an actual merge.\n record_only = opts[\"record-only\"]\n\n if record_only:\n report('recording merge of revision(s) %s from \"%s\"' %\n (revs, opts[\"source-url\"]))\n else:\n report('merging in revision(s) %s from \"%s\"' %\n (revs, opts[\"source-url\"]))\n\n # Do the merge(s). Note: the starting revision number to 'svn merge'\n # is NOT inclusive so we have to subtract one from start.\n # We try to keep the number of merge operations as low as possible,\n # because it is faster and reduces the number of conflicts.\n old_block_props = get_block_props(branch_dir)\n merge_metadata = logs[opts[\"source-url\"]].merge_metadata()\n block_metadata = logs[opts[\"source-url\"]].block_metadata()\n for start,end in minimal_merge_intervals(revs, phantom_revs):\n if not record_only:\n # Preset merge/blocked properties to the source value at\n # the start rev to avoid spurious property conflicts\n set_merge_props(branch_dir, merge_metadata.get(start - 1))\n set_block_props(branch_dir, block_metadata.get(start - 1))\n # Do the merge\n svn_command(\"merge --force -r %d:%d %s %s\" % \\\n (start - 1, end, opts[\"source-url\"], branch_dir))\n # TODO: to support graph merging, add logic to merge the property\n # meta-data manually\n\n # Update the set of merged revisions.\n merged_revs = merged_revs | revs | reflected_revs | phantom_revs | initialized_revs\n branch_props[opts[\"source-pathid\"]] = str(merged_revs)\n set_merge_props(branch_dir, branch_props)\n # Reset the blocked revs\n set_block_props(branch_dir, old_block_props)\n\n # Write out commit message if desired\n if opts[\"commit-file\"]:\n f = open(opts[\"commit-file\"], \"w\")\n if record_only:\n f.write('Recorded merge of revisions %s via %s from \\n' % \\\n (revs, NAME))\n else:\n f.write('Merged revisions %s via %s from \\n' % \\\n (revs, NAME))\n f.write('%s\\n' % opts[\"source-url\"])\n if opts[\"commit-verbose\"]:\n f.write(\"\\n\")\n f.write(construct_merged_log_message(opts[\"source-url\"], revs))\n\n f.close()\n report('wrote commit message to \"%s\"' % opts[\"commit-file\"])", "def _merge():\n df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],\n 'value': [1, 2, 3, 5]})\n df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],\n 'value': [5, 6, 7, 8]})\n print(df1)\n print(df2)\n print(df1.merge(df2, left_on='lkey', right_on='rkey'))\n print(df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=('_left', '_right')))", "def merge_results(self, other_processor):\n if not isinstance(other_processor, self.__class__):\n raise ValueError(f\"Can only extend with another \"\n f\"{self.__class__.__name__} instance.\")\n\n # Where there is overlap, there _should_ be agreement.\n self._evidence_counts.update(other_processor._evidence_counts)\n self._source_counts.update(other_processor._source_counts)\n self._belief_scores.update(other_processor._belief_scores)\n\n # Merge the statement JSONs.\n for k, sj in other_processor.__statement_jsons.items():\n if k not in self.__statement_jsons:\n self.__statement_jsons[k] = sj # This should be most of them\n else:\n # This should only happen rarely.\n for evj in sj['evidence']:\n self.__statement_jsons[k]['evidence'].append(evj)\n\n # Recompile the statements\n self._compile_results()\n return", "def merge_one_by_one_concise(self, revisions, commit_mergeinfo=False):\n print 'Merging one by one, concise mode'\n record_only_revisions = self.load_record_only_revisions()\n if record_only_revisions:\n print 'Found %d revisions to record-only from previous run: %s' % (\n len(record_only_revisions), revisions_as_string(record_only_revisions))\n record_only_revisions = record_only_revisions.intersection(set(revisions))\n merged_paths = set(self.target)\n revisions_to_merge = revisions[:]\n mergeinfo_revisions = set()\n while revisions_to_merge:\n print '=====> Merging: ' + revisions_as_string(revisions_to_merge)\n merged = []\n mergeinfo_revisions = set()\n for revision in revisions_to_merge:\n if self.is_no_merge_revision(revision, record_only_revisions):\n self.merge_record_only([revision])\n else:\n self.svn_merge(revision)\n self.resolve_conflicts(revision)\n merged_paths = self.revert_spurious_merges(revision, merged_paths)\n status = self.svn_status()\n if status.has_conflict:\n raise Conflict(\n revision=revision,\n mergeinfos=mergeinfo_revisions.union(record_only_revisions),\n source=self.source,\n target=self.target\n )\n if status.has_non_props_changes():\n merged = mergeinfo_revisions.copy()\n merged.add(revision)\n commit_log = self.commit_log(revision, mergeinfo_revisions)\n print commit_log\n self.commit(['-m', commit_log])\n if not self.svn.return_code:\n mergeinfo_revisions = set()\n break\n mergeinfo_revisions.add(revision)\n if mergeinfo_revisions == set(revisions_to_merge):\n if commit_mergeinfo:\n merged = mergeinfo_revisions.copy()\n commit_log = self.commit_log(mergeinfo_revisions=mergeinfo_revisions)\n print commit_log\n self.commit(['-m', commit_log])\n if not self.svn.return_code:\n mergeinfo_revisions = set()\n break\n else:\n print '=====> Only empty svn:mergeinfo to merge, skipping: %s' % ','.join([\n str(r) for r in revisions_to_merge])\n self.save_record_only_revisions(\n mergeinfo_revisions.union(record_only_revisions))\n return None\n revisions_to_merge = [r for r in revisions_to_merge if r not in merged]\n # Whole pass completed, nothing left pending to merge\n self.save_record_only_revisions(mergeinfo_revisions)\n return None", "def canBeMergedWith(self, other):", "def vmerge(self, dataset, on=None, left_on=None, right_on=None,\n row_id_name=None, left_id=None, right_id=None, row_ids=None,\n overwrite_text=False, from_set=None, uniquify_key=None,\n reset_index=True, inplace=True, verbose=True):\n datasets = [(self._meta, self._data)]\n merge_ds = [(ds._meta, ds._data) for ds in dataset]\n datasets.extend(merge_ds)\n merged_meta, merged_data = _vmerge(\n None, None, datasets, on=on, left_on=left_on,\n right_on=right_on, row_id_name=row_id_name, left_id=left_id,\n right_id=right_id, row_ids=row_ids, overwrite_text=overwrite_text,\n from_set=from_set, reset_index=reset_index, verbose=verbose)\n if inplace:\n self._data = merged_data\n self._meta = merged_meta\n if uniquify_key:\n self._make_unique_key(uniquify_key, row_id_name)\n return None\n else:\n new_dataset = self.clone()\n new_dataset._data = merged_data\n new_dataset._meta = merged_meta\n if uniquify_key:\n new_dataset._make_unique_key(uniquify_key, row_id_name)\n return new_dataset", "def execute(self, parameters, messages):\r\n arcpy.AddMessage(\"\\nPerforming overall merge...\")\r\n logging.info(\"Starting mergeAreas.py script...\\n\")\r\n # Define variables from parameters\r\n overlapWorkspace = parameters[0].valueAsText\r\n gdbWorkspace = parameters[1].valueAsText\r\n featWorkspace = parameters[2].valueAsText\r\n\r\n # Determine list of total overlap, no overlap and to merge feature classes in overlap feature dataset workspace to process.\r\n arcpy.env.workspace = overlapWorkspace\r\n mergeList = arcpy.ListFeatureClasses(\"*_toMerge\")\r\n totalOverlapList = arcpy.ListFeatureClasses(\"*_TotalOverlap\")\r\n noOverlapList = arcpy.ListFeatureClasses(\"*_noOverlap\")\r\n if len(mergeList) > 0:\r\n arcpy.AddMessage(\"Workspace contains the following \" + str(len(mergeList)) + \" feature classes to merge: \" + str(mergeList))\r\n\r\n # Organize toMerge feature classes by date\r\n mergeDictbyDate = {}\r\n for fc in mergeList:\r\n fcPath = os.path.join(overlapWorkspace, fc)\r\n fcDate = fc.split(\"_\")[1]\r\n mergeDictbyDate[fcDate] = [fcPath]\r\n\r\n # Append no overlap feature classes toMerge feature classes by date\r\n for noOverlapFc in noOverlapList:\r\n noOverlapPath = os.path.join(overlapWorkspace, noOverlapFc)\r\n noOverlapDate = noOverlapFc.split(\"_\")[1]\r\n mergeDictbyDate[noOverlapDate].append(noOverlapPath)\r\n\r\n # Organize dark targets feature classes by date\r\n arcpy.env.workspace = featWorkspace\r\n fcList = arcpy.ListFeatureClasses()\r\n fcDictByDate = {}\r\n for fc in fcList:\r\n fcPath = os.path.join(featWorkspace, fc)\r\n fcSplit = fc.split(\"_\")\r\n if fcSplit[1] in fcDictByDate:\r\n fcDictByDate[fcSplit[1]].append(fcPath)\r\n else:\r\n fcDictByDate[fcSplit[1]] = [fcPath]\r\n\r\n # Iterate through dark targets acquisition dates and check for acquisition dates with more than a single feature class (for merging)\r\n for key in fcDictByDate:\r\n if len(fcDictByDate[key]) > 1:\r\n\r\n # Iterate through feature classes within acquisition date\r\n for fc in fcDictByDate[key]:\r\n fcSplit = fc.split(\"_\")\r\n\r\n # Check for and add acquisition date toMerge feature classes if not already present\r\n if fcSplit[len(fcSplit)-2] not in mergeDictbyDate:\r\n mergeDictbyDate[fcSplit[len(fcSplit)-2]] = [fc]\r\n\r\n # Check for and add feature class toMerge feature classes if not already present within acquisition date\r\n else:\r\n fcValue = fc.split(\"\\\\\")[len(fc.split(\"\\\\\"))-1] + \"_noOverlap\"\r\n fcValuePath = os.path.join(overlapWorkspace, fcValue)\r\n if fcValuePath not in mergeDictbyDate[key]:\r\n mergeDictbyDate[key].append(fc)\r\n\r\n # Iterate through dark targets acquisition dates to compile lists of feature classes to merge\r\n for key in mergeDictbyDate:\r\n arcpy.AddMessage(\"\\nMerging feature classes in \" + key + \"...\")\r\n logging.info(\"Processing merges for acquisition date '%s'\", key)\r\n mergeList = []\r\n\r\n # Iterate through feature classes within acquisition date and append them to merge list\r\n for item in mergeDictbyDate[key]:\r\n mergeList.append(item)\r\n\r\n # Merge feature classes in merge list into single feature class for the acquisition date\r\n outputDissolveString = \"RS2_\" + key + \"_toDissolve\"\r\n outputDissolve = os.path.join(overlapWorkspace, outputDissolveString)\r\n arcpy.Merge_management(mergeList, outputDissolve)\r\n logging.info(\"Merge: '%s' created from merging the following feature classes: '%s'\", outputDissolve, str(mergeList))\r\n\r\n # Dissolve attribute duplicates and rename fields\r\n arcpy.AddMessage(\"Dissolving...\")\r\n dissolveLyr = \"dissolveLyr\"\r\n outputMergeString = \"RS2_\" + key + \"_merged\"\r\n outputMerge = os.path.join(gdbWorkspace, outputMergeString)\r\n dissolveFields = [\"Pid\", \"RsatID\"]\r\n fieldList = arcpy.ListFields(outputDissolve)\r\n statsFields = []\r\n for field in fieldList:\r\n if \"OBJECTID\" in field.name or \"FID\" in field.name or \"Shape\" in field.name or field.name in dissolveFields or field.name == \"ID\":\r\n continue\r\n statsField = [field.name,\"FIRST\"]\r\n statsFields.append(statsField)\r\n arcpy.MakeFeatureLayer_management(outputDissolve, dissolveLyr)\r\n logging.info(\"Make Feature Layer: '%s' layer created from '%s' feature class\", dissolveLyr, outputDissolve)\r\n arcpy.Dissolve_management(dissolveLyr, outputMerge, dissolveFields, statsFields)\r\n logging.info(\"Dissolve: '%s' feature class created from '%s' layer dissolve\", outputMerge, dissolveLyr)\r\n fieldList = arcpy.ListFields(outputMerge)\r\n for field in fieldList:\r\n if field.name.startswith(\"FIRST_\"):\r\n newName = field.name[6:]\r\n arcpy.AlterField_management(outputMerge, field.name, newName)\r\n\r\n # Update targetID with combined target ID for overlapping features\r\n arcpy.AddMessage(\"Updating targetID...\")\r\n finalOutputString = \"RS2_\" + key\r\n overlapBool = False\r\n\r\n # Iterate through total overlap feature classes\r\n for fc in totalOverlapList:\r\n\r\n # Check for merged acquisition date feature class containing overlapping features (by finding equivalent total overlap feature class)\r\n if finalOutputString == fc.strip(\"_TotalOverlap\"):\r\n overlapBool = True\r\n\r\n # Perform spatial join to access targetID field from total overlap feature class\r\n totalOverlapFc = os.path.join(overlapWorkspace, fc)\r\n finalOutput = os.path.join(gdbWorkspace, finalOutputString)\r\n fieldmappings = arcpy.FieldMappings()\r\n fieldmappings.addTable(outputMerge)\r\n fldmap_TARGETID = arcpy.FieldMap()\r\n fldmap_TARGETID.addInputField(totalOverlapFc, \"targetID\")\r\n fld_TARGETID = fldmap_TARGETID.outputField\r\n fld_TARGETID.name = \"targetID_1\"\r\n fldmap_TARGETID.outputField = fld_TARGETID\r\n fieldmappings.addFieldMap(fldmap_TARGETID)\r\n arcpy.SpatialJoin_analysis(outputMerge, totalOverlapFc, finalOutput, \"#\", \"#\", fieldmappings)\r\n logging.info(\"Spatial Join: '%s' feature class created by joining '%s' with '%s'\", finalOutput, outputMerge, totalOverlapFc)\r\n\r\n # Update targetID with combined targetID determined from total overlap feature class\r\n expression = \"copyTargetID(!targetID!, !targetID_1!)\"\r\n codeblock = \"\"\"def copyTargetID(targetID, comb_targetID):\r\n if comb_targetID is None:\r\n return targetID\r\n else:\r\n return comb_targetID\"\"\"\r\n arcpy.CalculateField_management(finalOutput, \"targetID\", expression, \"PYTHON_9.3\", codeblock)\r\n logging.info(\"Calculate Field: 'targetID' field value calculated for '%s' feature class\", finalOutput)\r\n\r\n # Delete extraneous fields\r\n arcpy.DeleteField_management(finalOutput, \"targetID_1\")\r\n arcpy.DeleteField_management(finalOutput, \"Join_Count\")\r\n arcpy.DeleteField_management(finalOutput, \"TARGET_FID\")\r\n\r\n # Rename merged acquisition date feature class to appropriate name if it does not contain overlapping targets\r\n if overlapBool is False:\r\n arcpy.Rename_management(outputMerge, finalOutputString)\r\n logging.info(\"Rename: '%s' feature class renamed to '%s'\", outputMerge, finalOutputString)\r\n\r\n # Delete unneeded process outputs (dissolve and merge outputs)\r\n arcpy.Delete_management(outputDissolve)\r\n logging.info(\"Delete: '%s' feature class deleted\", outputDissolve)\r\n if arcpy.Exists(outputMerge):\r\n arcpy.Delete_management(outputMerge)\r\n logging.info(\"Delete: '%s' feature class deleted\", outputMerge)\r\n\r\n logging.info(\"Processing for merges for acquisition date '%s' complete\\n\", key)\r\n\r\n # Iterate through dark targets acquisition dates to export single feature classes\r\n arcpy.AddMessage(\"\\nExporting single feature classes...\")\r\n logging.info(\"Processing single feature classes to export\")\r\n for key in fcDictByDate:\r\n if len(fcDictByDate[key]) == 1:\r\n for fc in fcList:\r\n fcSplit = fc.split(\"_\")\r\n if fcSplit[1] in mergeDictbyDate:\r\n continue\r\n else:\r\n outputFeatureName = \"RS2_\" + fcSplit[1]\r\n arcpy.FeatureClassToFeatureClass_conversion(fc, gdbWorkspace, outputFeatureName, \"#\", \"#\", )\r\n logging.info(\"Feature Class to Feature Class: '%s' feature class converted to '%s'\", fc, outputFeatureName)\r\n outputFeatPath = os.path.join(gdbWorkspace, outputFeatureName)\r\n arcpy.DeleteField_management(outputFeatPath, \"FID\")\r\n logging.info(\"Processing of single feature classes to export complete\")\r\n\r\n logging.info(\"mergeAreas.py script finished\\n\\n\")\r\n\r\n return", "def merge_recs(part_one, part_two, output):\n start_op_length = 28\n with open(part_one, 'rb') as a_handle, \\\n open(part_two, 'rb') as b_handle, \\\n open(output, 'wb') as merged:\n\n a_data = a_handle.read()\n b_data = b_handle.read()\n\n postgame_pos, _ = find_postgame(a_data, len(a_data))\n if postgame_pos:\n a_data_end = postgame_pos - LOOKAHEAD\n else:\n a_data_end = len(a_data)\n b_header_len, = struct.unpack('<I', b_data[:4])\n chapter = mgz.body.operation.build({\n 'type': 'action',\n 'op': 1,\n 'length': 2,\n 'action': {\n 'type': 'chapter',\n 'player_id': 0xff # our merge marker\n }\n })\n\n # part A with no postgame struct\n merged.write(a_data[:a_data_end])\n # chapter action\n merged.write(chapter)\n # offset to start of part B operations\n merged.write(struct.pack('<I', a_data_end + len(chapter) + b_header_len))\n # part B header (now a \"saved chapter\")\n merged.write(b_data[4:b_header_len])\n # part B operations with no start operation\n merged.write(b_data[b_header_len + start_op_length:])", "def merge(self, obj):\n mlist = self.selected_handles()\n \n if len(mlist) != 2:\n msg = _(\"Cannot merge citations.\")\n msg2 = _(\"Exactly two citations must be selected to perform a \"\n \"merge. A second citation can be selected by holding \"\n \"down the control key while clicking on the desired \"\n \"citation.\")\n ErrorDialog(msg, msg2)\n else:\n citation1 = self.dbstate.db.get_citation_from_handle(mlist[0])\n citation2 = self.dbstate.db.get_citation_from_handle(mlist[1])\n if not citation1.get_reference_handle() == \\\n citation2.get_reference_handle(): \n msg = _(\"Cannot merge citations.\")\n msg2 = _(\"The two selected citations must have the same \"\n \"source to perform a merge. If you want to merge \"\n \"these two citations, then you must merge the \"\n \"sources first.\")\n ErrorDialog(msg, msg2)\n else:\n MergeCitation(self.dbstate, self.uistate, mlist[0], mlist[1])", "def merge(self, branch_names):\n\n self.git(\"merge\", *branch_names)", "def merge_both_tables():\n old = Table.read('data/data_table_cartesian_including_tims_stars_with_bg_ols_and_component_overlaps.fits')\n wanted = Table.read('data/scocen_candidates_300k_only_spatial_cut.fits')\n additional = Table.read('data/scocen_candidates_300k_only_spatial_cut_200k_to_determine_bg_ols.fits')\n\n d_old = dict(zip(old['source_id'], old['background_log_overlap']))\n d_add = dict(zip(additional['source_id'], additional['background_log_overlap']))\n d_old.update(d_add)\n dct = d_old\n\n ln_bg_ols = [dct[source_id] for source_id in wanted['source_id']]\n print\n len(ln_bg_ols), len(wanted)\n\n wanted['background_log_overlap'] = ln_bg_ols\n print\n wanted\n\n wanted.write('data/scocen_candidates_300k_only_spatial_cut.fits', overwrite=True, format='fits')", "def MergeOptMerge(opt):\r\n merger = merge_optimizer\r\n opt = SeqOptimizer([merger, opt, merger])\r\n opt.name = \"MergeOptMerge\"\r\n return opt", "def __add__(self, other):\n if self.xml.find('mosromgrmeta') is None or isinstance(other, RunningOrderControl):\n return other.merge(self)\n raise MosCompletedMergeError(\"Cannot merge completed MOS file\")", "def merge(a, b):\r\n # your code here\r\n \r\n m = []\r\n i, j = 0, 0\r\n \r\n while i < len(a) and j < len(b):\r\n if a[i] < b[j]:\r\n m.append(a[i])\r\n i += 1\r\n else:\r\n m.append(b[j])\r\n j += 1\r\n \r\n m += a[i:] + b[j:]\r\n \r\n return m", "def merge(file_a, file_b):\n res = []\n a = read_bin_file_in_full(file_a)\n b = read_bin_file_in_full(file_b)\n pa = 0\n pb = 0\n while pa < len(a) and pb < len(b):\n if a[pa] < b[pb]:\n res.append(a[pa])\n pa += 1\n else:\n res.append(b[pb])\n pb += 1\n while pa < len(a):\n res.append(a[pa])\n pa += 1\n while pb < len(b):\n res.append(b[pb])\n pb += 1\n\n return write_to_temp(res)", "def merge(self):\n collapsable = self.findSameSubtrees()\n\n dummy = GraphiusNode(-1, None)\n for i, node in self.nodes.items():\n dummy.addNeighbor(node)\n\n # Perform the merge\n self.mergeHelper(dummy, collapsable)\n\n # Regenerate trees\n newNodes = self.dfs(dummy)\n assert(-1 not in newNodes)\n self.nodes = newNodes", "def merge(self, op):\n self.__desc = listify(self.__desc, op.__desc)\n self.__name = listify(self.__name, op.__name)\n self.__label_pre = listify(self.__label_pre, op.__label_pre)\n self.__label_post = listify(self.__label_post, op.__label_post)", "def merge_result(self, k):\n self.build_pool()\n return self.merge(k)\n if not k in self._merged:\n self.build_pool()\n self._merged[k] = self.merge(k)\n return self._merged[k]", "def svn_client_merge(char_source1, svn_opt_revision_t_revision1, char_source2, svn_opt_revision_t_revision2, char_target_wcpath, svn_boolean_t_recurse, svn_boolean_t_ignore_ancestry, svn_boolean_t_force, svn_boolean_t_dry_run, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def up(job, inputFileID1, inputFileID2, memory=sortMemory):\n with job.fileStore.writeGlobalFileStream() as (fileHandle, outputFileStoreID):\n with job.fileStore.readGlobalFileStream( inputFileID1 ) as inputFileHandle1:\n with job.fileStore.readGlobalFileStream( inputFileID2 ) as inputFileHandle2:\n merge(inputFileHandle1, inputFileHandle2, fileHandle)\n job.fileStore.logToMaster( \"Merging %s and %s to %s\"\n % (inputFileID1, inputFileID2, outputFileStoreID) )\n #Cleanup up the input files - these deletes will occur after the completion is successful. \n job.fileStore.deleteGlobalFile(inputFileID1)\n job.fileStore.deleteGlobalFile(inputFileID2)\n return outputFileStoreID", "def imerge(*iterables):\n return _IMerge(iterables)", "def merge (*a_data) :\n i = 0\n for loc_data in a_data :\n i += 1\n if i == 1 :\n loc_new_df = loc_data\n else :\n loc_new_df = __pd.merge(loc_new_df,loc_data,left_index=True,right_index=True)\n return loc_new_df", "def merge(self, other, allow_duplicate=False, do_spaces=True, do_datasets=True, do_tasksets=True, do_results=True):\n #TODO: May need to organize a staging area to ensure this merge is atomic\n if self.mode == 'r': raise ValueError, \"Cannot merge into read-only store\"\n ignored_md = ['uuid', 'avg_learn', 'avg_classify', 'name', 'feature_name', 'class_name']\n\n space_direct_copy = [] # Spaces we copy directly, meaning the featuremap can be copied too\n space_feature_mapping = {}\n if do_spaces or do_datasets:\n # Must do spaces if we do datasets, because spaces may have been updated\n for space_node in ProgressIter(list(other.spaces), label='Copying spaces'):\n logger.debug(\"Considering space '%s'\", space_node._v_name)\n space_name = space_node._v_name\n if hasattr(self.spaces, space_name):\n logger.debug('Already had %s', space_name)\n src_space = other.get_Space(space_name)\n # Need to merge these. Feature spaces can be extended, but there is no mechanism for doing the same with class\n # spaces at the moment, so we must reject any that do not match. \n dst_space = self.get_Space(space_name)\n if src_space == dst_space:\n logger.debug(' Exact match')\n space_direct_copy.append(space_name)\n else:\n md = get_metadata(space_node)\n if md['type'] == 'class':\n raise ValueError, \"Cannot merge due to different versions of %s\" % str(md)\n elif md['type'] == 'feature':\n logger.debug(' Attempting to merge %s', str(md))\n # Reconcile the spaces. \n ## First we need to compute the new features to add\n new_feats = sorted(set(src_space) - set(dst_space))\n logger.debug(' Identified %d new features', len(new_feats))\n reconciled_space = dst_space + new_feats\n if len(new_feats) != 0:\n # Only need to extend if new features are found.\n self.extend_Space(space_name, reconciled_space)\n ## Now we need to build the mapping from the external space to ours\n space_index = dict( (k,v) for v,k in enumerate(reconciled_space))\n space_feature_mapping[space_name] = dict( (i,space_index[s]) for i,s in enumerate(src_space))\n else:\n raise ValueError, \"Unknown type of space\"\n else:\n self.fileh.copyNode(space_node, newparent=self.spaces)\n space_direct_copy.append(space_name)\n \n if do_datasets:\n for src_ds in ProgressIter(list(other.datasets), label='Copying datasets'):\n dsname = src_ds._v_name\n\n logger.debug(\"Considering dataset '%s'\", dsname)\n if hasattr(self.datasets, dsname):\n logger.warning(\"already had dataset '%s'\", dsname)\n dst_ds = getattr(self.datasets, dsname)\n # Failure to match instance_id is an immediate reject\n if dst_ds._v_attrs.instance_space != src_ds._v_attrs.instance_space:\n raise ValueError, \"Instance identifiers don't match for dataset %s\" % dsname\n # The hardest to handle is the feature data, since we may need to rearrange feature maps\n else:\n instance_space = other.get_DatasetMetadata(dsname)['instance_space']\n self.add_Dataset(dsname, instance_space, other.get_Space(dsname))\n dst_ds = getattr(self.datasets, dsname)\n\n node_names = ['class_data', 'sequence', 'tokenstreams']\n for name in node_names:\n logger.debug('Copying %s',name)\n if hasattr(src_ds, name):\n src_parent = getattr(src_ds, name)\n #TODO: may need to handle incomplete destination nodes\n dst_parent = getattr(dst_ds, name)\n for node in src_parent:\n if hasattr(dst_parent, node._v_name):\n logger.warning(\"already had '%s' in '%s'\", node._v_name, name)\n else:\n self.fileh.copyNode(node, newparent=dst_parent, recursive=True)\n else:\n logger.warning(\"Source does not have '%s'\", name)\n\n logger.debug('Copying feature_data')\n for node in src_ds.feature_data:\n space_name = node._v_name\n if hasattr(dst_ds.feature_data, space_name):\n logger.warning(\"already had '%s' in 'feature_data'\", space_name) \n elif space_name in space_direct_copy:\n # Direct copy the feature data because the destination store did not have this\n # space or had exactly this space\n logger.debug(\"direct copy of '%s' in 'feature_data'\", space_name)\n self.fileh.copyNode(node, newparent=dst_ds.feature_data, recursive=True)\n else:\n ax0 = node.feature_map.read(field='ax0')\n ax1 = node.feature_map.read(field='ax1')\n value = node.feature_map.read(field='value')\n feature_mapping = space_feature_mapping[space_name]\n\n feat_map = [ (i,feature_mapping[j],v) for (i,j,v) in zip(ax0,ax1,value)]\n self.add_FeatureDict(dsname, space_name, feat_map)\n\n \n # TASKS & RESULTS\n def __merge(datum, check):\n logger.debug(\"Copying %s\", datum)\n src_node = getattr(other, datum)\n dst_node = getattr(self, datum)\n for t in ProgressIter(list(src_node), label='Copying %s' % datum):\n logger.debug(\"Considering %s '%s'\", datum, t._v_name)\n\n # Check if the exact result has been previously copied\n if t._v_name in dst_node:\n logger.warn(\"Skipping previous %s: %s\", datum, t._v_name)\n else:\n md = get_metadata(t)\n for i in ignored_md: \n if i in md: \n del md[i]\n # Check for equivalent metadata\n if not allow_duplicate and check(md):\n logger.warn(\"Ignoring duplicate in %s: %s\", datum, str(md))\n else:\n try:\n self.fileh.copyNode(t, newparent=dst_node, recursive=True)\n except tables.NoSuchNodeError:\n logger.critical(\"Damaged node skipped\")\n\n if do_tasksets:\n # Copy entire nodes\n __merge('tasksets', self.has_TaskSet)\n # Now work our way through and check if any weights need updating\n for src in ProgressIter(other.get_TaskSets({}), label='Copying weights'):\n if src.node._v_name in self.tasksets:\n dst = StoredTaskSet(self, getattr(self.tasksets, src.node._v_name))\n else:\n md = dict(src.metadata)\n for i in ignored_md: \n if i in md: \n del md[i]\n dst = self.get_TaskSet(md)\n # sanity check for compatibility\n if len(src.tasks) != len(dst.tasks):\n logger.warning('number of tasks in src and dst do not match; skipping')\n continue\n for i, task in enumerate(src.tasks):\n dst.tasks[i].weights.update(src.tasks[i].weights)\n\n if do_results:\n __merge('results', self.has_TaskSetResult)", "def merge(self, a, b, path=None):\n if path is None: path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n if key == 'attributes':\n self.merge_attribute_defs(b, a)\n else:\n self.merge(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n else:\n # raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))\n self.append_or_replace(a,b,key, '/'.join(path + [str(key)]));\n else:\n a[key] = b[key]\n return a", "def _do_merge(ext, exts_other):\n for ext_other in exts_other:\n if not ext.is_duplicate(ext_other):\n return False\n return True", "def merge(files, dst):\n\n if len(files) == 0:\n raise click.BadArgumentUsage(\"Please provide both input files and destination file\")\n\n if len(files) == 1:\n path = files[0]\n base, pattern = os.path.split(path)\n with fs.open_fs(base) as ffs:\n files = [\"{}{}\".format(base, match.path) for match in ffs.glob(pattern)]\n\n las_files = [pylas.read(openbin_file(f)) for f in IncrementalBar(\"Reading files\").iter(files)]\n\n try:\n with click_spinner.spinner():\n click.echo(\"Merging\")\n merged = pylas.merge(las_files)\n click.echo(\"Writing\")\n merged.write(openbin_file(dst, mode='w'), do_compress=dst.endswith('.laz'))\n\n except Exception as e:\n click.echo(click.style(str(e), fg=\"red\"))\n raise click.Abort()", "def merge_two_calls(self) -> None:", "def merge_all_data(self):\n\n logging.info('***** Starting the merging process merge_all_data')\n\n \"\"\" All possible unique_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n date_times = np.array(date_times) \n\n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, = [] , [] , [] , [] , []\n best_ds_list = [] \n source_files = []\n station_configurations = []\n\n \"\"\" The items contained in the lists in the list below can be removed from the list when the record that was previously stored is removed. \"\"\"\n all_list = [all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, best_ds_list, source_files , station_configurations ] # holder of all the above lists\n all_list_name = ['all_combined_obs' , 'all_combined_head', 'all_combined_era5fb' , 'combined_indices' , 'combined_date_time' , 'best_ds_list', 'source_files' ] \n \n removed_record, kept_record = [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #dt_bestds_dic = {} # store the selected best dataset for each dt \n #date_times=date_times[0:30000]\n tot = len(date_times)\n tt=time.time()\n print('*** Merging ' , tot, ' records ***')\n \n early_datasets = True\n \n self.processed_dt = [] \n \n for dt, c in zip(date_times, range(tot) ): # loop over all the possible date_times \n\n if (c+1)%1000==0:\n print('Analize : ', str(c+1) , '/', str(tot) , ' ', dt , ' ',\n now(time.time()),'{:5.3f}'.format(time.time()-tt ))\n\n delete = self.delete_ds(dt) # check if there is a dataset to delete \n \n \"\"\" Finding if this record is the same as the previous one analyzed, according to the given time_shift \"\"\"\n if c == 0:\n is_same_record = False\n else:\n is_same_record = self.is_same_record( time_shift = self.hour_time_delta , dt = dt)\n \n \"\"\" Updating list of processed datetimes \"\"\"\n self.processed_dt.append(dt) # cannot put it before the check_timeshift or it will check itself \n\n \n cleaned_df_container = {} \n all_len = [] # will hold the length of all the obs_tabs \n \n for k in self.dataset_per_dt[dt].keys() : # checking the list of available datasets \n ''' {'era5_2': ['example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._1:82930.gz.nc', \n 'example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._82930.gz.nc']}\n ''' \n for F in self.dataset_per_dt[dt][k]: # checking the list of available files for the dataset\n \n if data[k][F][\"counter\"] %self.slice_size==0 or data[k][F][\"counter\"] == 0: # loading the data only at specific slices \n load = self.load_obstab_feedback_sliced(datetime=dt, dataset=k, file = F)\n \n data[k][F][\"counter\"] = data[k][F][\"counter\"] + 1 \n \n obs_tab, era5fb_tab = self.make_obstab_era5fb_dic(dataset = k , date_time = dt, File = F )\n\n if len(obs_tab['date_time'][:])==0: # go to next file if obs_tab is empty \n #print('ZERO length')\n continue \n\n all_len.append( len(obs_tab['date_time'][:] ) )\n \n if k not in cleaned_df_container.keys():\n cleaned_df_container[k] = {}\n\n cleaned_df_container[k][F] = {}\n cleaned_df_container[k][F]['obs_tab'] = obs_tab # cleaned dataframe \n cleaned_df_container[k][F]['era5fb_tab'] = era5fb_tab # cleaned dataframe \n \n \"\"\" Merging the different records found in the sifferent sources \"\"\"\n if bool(all_len): # skipping empty container dictionary. At this point I certainyl have one valid record \n best_ds, combined_obs_tab, combined_era5fb_tab, combined_head_tab, selected_file, best_file = self.combine_record(dt, container = cleaned_df_container)\n \n if is_same_record: # decide what to keep in case of same record\n temporary_previous = all_combined_obs[-1] # keep the temporary previous record \n\n if best_ds in ['era5_1','era5_2']: # best_ds from era5\n if best_ds_list[-1] not in ['era5_1','era5_2']: # remove previous non era5_1 or era5_2 record \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n\n elif best_ds_list[-1] in ['era5_1','era5_2']:\n if len(combined_obs_tab) <= len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab)\n continue # nothing to do, will keep the previous records -> go to next dt \n \n else: # case where both the current and previous are from era5_1 and era5_2, but the previous has smaller number of data \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # best_ds not from era5\n if best_ds_list[-1] in ['era5_1','era5_2']:\n #print('This best ds is ' , best_ds , ' but I will keep ' , best_ds_list[-1] )\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else:\n if len(combined_obs_tab) < len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue # nothing to do, will keep the previous records -> go to next dt \n \n elif len(combined_obs_tab) > len(all_combined_obs[-1] ): # remove previous, keep current \n for lista in all_list:\n lista.pop() \n #kept_record.append(combined_obs_tab) \n #removed_record.append(temporary_previous)\n \n elif len(combined_obs_tab) == len(all_combined_obs[-1] ): # prefer igra2, otherwise\n if best_ds == 'igra2':\n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # case where data source is not important, I keep the previous and do nothing \n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else: # not the same record, nothing special to do, keep both previous and current \n pass \n else:\n print(' Found an empty record / time shifted record ')\n continue\n \n\n \"\"\" Fill the best_ds list \"\"\"\n best_ds_list.append(best_ds)\n\n \"\"\" Storing the selected file for the source_configuration \"\"\"\n source_files.append(selected_file)\n \"\"\" Selecting the station_configuration \"\"\"\n station_configurations.append(self.data[best_ds][best_file]['station_configuration'] )\n \n \"\"\" Storing the combined era5fb, header and observations tables\"\"\"\n all_combined_era5fb.append(combined_era5fb_tab)\n all_combined_obs .append(combined_obs_tab)\n \n primary, name = self.data[best_ds][best_file]['station_configuration']['primary_id'][0] , self.data[best_ds][best_file]['station_configuration']['station_name'][0] \n #combined_head_tab['primary_station_id'] = [ primary ] * len( combined_head_tab ) \n #combined_head_tab['station_name'] = [ name ] * len( combined_head_tab ) \n \n combined_head_tab['primary_station_id'] = np.array( [primary] )\n combined_head_tab['station_name'] = np.array( [name] )\n \n all_combined_head .append(combined_head_tab)\n\n \"\"\" Dictionary to fill the best_ds for duplicates \"\"\"\n #dt_bestds_dic[dt] = {}\n #dt_bestds_dic[dt]['best_ds'] = best_ds\n #dt_bestds_dic[dt]['len'] = len(combined_obs_tab['date_time'])\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n combined_indices.append(len(combined_obs_tab['date_time'])) \n combined_date_time.append(dt)\n\n del cleaned_df_container \n \n \n \n #print(blue + 'Memory used after deleting the cleaned_df_container: ', process.memory_info().rss/1000000000 , cend)\n\n \"\"\" Removing remaining loaded df \"\"\"\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n try:\n del data[k][F]['era5fb_tab']\n print('=== removed era5fb ' , k , F )\n except:\n pass\n try:\n del data[k][F]['observations_table']\n print('=== removed obstab ' , k , F ) \n except:\n pass\n \n \n \"\"\" Saving a numpy dictionary \"\"\"\n print(\" === Saving the numpy dictionary of removed and kept records +++ \")\n #dic_records = { 'kept' : kept_record , 'removed': removed_record }\n #np.save(self.station + '_time_shift_removed_kept.npy',dic_records )\n \n \n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n combined_date_time = np.array(combined_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : combined_date_time.shape } , combined_date_time )\n di['recordtimestamp'].attrs['units']='seconds since 1900-01-01 00:00:00'\n\n \"\"\" Creating the merged indices mi \"\"\"\n mi = [] \n mi.append(0)\n for i in range(len(combined_indices)):\n mi.append( combined_indices[i] + mi[-1] )\n mi.pop()\n pop = np.array(mi) # removing last unecessary index \n di['recordindex'] = ( {'recordindex' : pop.shape } , pop )\n\n\n \"\"\" Creating the combined data \"\"\"\n logging.debug('*** Concatenating the observations_table ' ) \n combined_obs = {}\n #### Writing combined observations_table dic\n logging.info(' ***** Writing the observations_table to the netCDF output ***** ' ) \n for k in all_combined_obs[0].keys(): \n a = np.concatenate([all_combined_obs[i][k][:] for i in range(len(all_combined_obs))])\n if k == 'date_time':\n combined_obs[k]= a \n self.tot_records = len(combined_obs[k])\n self.write_merged(content = 'observations_table', table= {k:a})\n #logging.info('*** Written observations table %s: ', k)\n\n\n #self.tot_records = len(combined_obs['date_time'])\n del all_combined_obs\n print(blue + 'Memory used after deleting all_combined_obs dic: ', process.memory_info().rss/1000000000 , cend )\n \n dateindex = combined_obs['date_time']//86400 \n date_times, indices, counts = np.unique(dateindex, return_counts = True, return_index= True) \n di['dateindex'] = ( {'dateindex' : indices.shape } , indices ) # considers the day only \n del combined_obs\n \n combined_era5fb = {}\n #### Writing combined era5fb_table dic \n for k in all_combined_era5fb[0].keys():\n try:\n #combined_era5fb[k]=np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n #self.write_merged(content = 'era5fb', table= {k:combined_era5fb[k]})\n \"\"\" try replacing , remove combined_era5fb = {} \"\"\"\n a = np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n self.write_merged(content = 'era5fb', table= {k:a})\n logging.debug('*** Written era5fb %s: ', k)\n except:\n print(\"FAILED feedback variable \" , k)\n\n del all_combined_era5fb\n print(blue + 'Memory used after deleting era5fb_tab dic: ', process.memory_info().rss/1000000000 , cend)\n\n\n #### Writing combined header_table dic \n for k in all_combined_head[0].keys():\n print('head variable is', k )\n if ( k == 'comments' or k == 'history'):\n continue\n try:\n tab=np.concatenate([all_combined_head[i][k][:] for i in range(len(all_combined_head))])\n self.write_merged(content = 'header_table', table= {k: tab}) # { key: np.array([])}\n logging.info('*** Written header table %s: ', k)\n except:\n print('FFF FAILED variable in header table', k )\n\n del all_combined_head\n print(blue + 'Memory used after deleting all_merged head_tab dic: ', process.memory_info().rss/1000000000 , cend)\n \n self.write_merged(content = 'recordindex', table = di) \n self.write_merged(content = 'cdm_tables', table= '')\n\n\n source_conf=xr.Dataset()\n source_files = np.array(source_files).astype(dtype='|S70')\n source_conf['source_file'] = ( {'source_file' : source_files.shape } , source_files )\n self.write_merged(content = 'source_configuration', table= source_conf )\n\n print(0)\n\n\n \"\"\" Concatenation of station_configurations \"\"\"\n station_conf = pd.concat( station_configurations ) \n for k in station_conf.columns:\n try:\n a =np.array( station_conf[k])\n self.write_merged(content = 'station_configuration', table= {k:a})\n logging.debug('*** Written station_configuration %s: ', k)\n except:\n print(\" Failed station_configuration \" , k )\n \n return 0", "def change_merged(self, event):\n pass", "def merge_into(self, target):\n self._merge_feedbackset_into(target)\n self._merge_candidates_into(target)\n self._merge_examiners_into(target)\n self._merge_tags_into(target)\n self.delete()", "def merge_other(self, other):\n assert(not other.isSet())\n with self.__cond:\n if self.__isset:\n other.set(self.__data)\n else:\n self.__merged.append(other)", "def merge(self, other):\n self._moments = merge_pqc([self, other])._moments\n self._parameters = sp.symarray(self.parameter_symbol, len(self.symbols))\n if self.flatten_circuit:\n self.flatten()", "def Unit_merge(*args):\n return _libsbml.Unit_merge(*args)", "def perform_dataframe_upload_merge(\n workflow,\n dst_df: pd.DataFrame,\n src_df: pd.DataFrame,\n merge_info: Dict,\n):\n # STEP 1 Rename the column names.\n src_df = src_df.rename(\n columns=dict(list(zip(\n merge_info['initial_column_names'],\n merge_info['rename_column_names']))))\n\n # STEP 2 Drop the columns not selected\n columns_to_upload = merge_info['columns_to_upload']\n src_df.drop(\n [\n col for idx, col in enumerate(list(src_df.columns))\n if not columns_to_upload[idx]\n ],\n axis=1,\n inplace=True)\n\n # If no keep_key_column value is given, initialize to True\n if 'keep_key_column' not in merge_info:\n kk_column = []\n for cname in merge_info['rename_column_names']:\n kk_column.append(pandas.is_unique_series(src_df[cname]))\n merge_info['keep_key_column'] = kk_column\n\n # Get the keys\n src_key = merge_info['src_selected_key']\n dst_key = merge_info['dst_selected_key']\n\n # STEP 3 Perform the combination\n # Separate the columns in src that overlap from those that do not\n # overlap, but include the key column in both data frames.\n overlap_names = set(dst_df.columns).intersection(src_df.columns)\n src_no_overlap_names = set(src_df.columns).difference(overlap_names)\n src_df_overlap = src_df[list(overlap_names.union({src_key}))]\n src_df_no_overlap = src_df[list(src_no_overlap_names.union({src_key}))]\n\n # Step A. Perform the merge of non-overlapping columns\n new_df = _perform_non_overlapping_column_merge(\n dst_df,\n src_df_no_overlap,\n merge_info,\n dst_key,\n src_key)\n\n # Step B. Perform the update with the overlapping columns\n new_df = _perform_overlap_update(\n new_df,\n src_df_overlap,\n dst_key,\n src_key,\n merge_info['how_merge'])\n\n # If the merge produced a data frame with no rows, flag it as an error to\n # prevent loosing data when there is a mistake in the key column\n if new_df.shape[0] == 0:\n raise Exception(gettext(\n 'Merge operation produced a result with no rows'))\n\n # If the merge produced a data frame with no unique columns, flag it as an\n # error to prevent the data frame from propagating without a key column\n if not pandas.has_unique_column(new_df):\n raise Exception(gettext(\n 'Merge operation produced a result without any key columns. '\n + 'Review the key columns in the data to upload.',\n ))\n\n # Store the result back in the DB\n pandas.store_dataframe(new_df, workflow)\n\n _update_is_key_field(merge_info, workflow)\n\n # Recompute all the values of the conditions in each of the actions\n for action in workflow.actions.all():\n action.update_selected_row_counts()", "def svn_client_merge2(char_source1, svn_opt_revision_t_revision1, char_source2, svn_opt_revision_t_revision2, char_target_wcpath, svn_boolean_t_recurse, svn_boolean_t_ignore_ancestry, svn_boolean_t_force, svn_boolean_t_dry_run, apr_array_header_t_merge_options, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def test_merge_max(self):\n ars = self.ar[2009][11]['general']\n ars2 = awstats_reader.AwstatsReader(test_file_dir,\n 'joshuakugler.com')[2009][11]['general']\n self.assertEqual(ars.merge(ars2, 'LastLine', 'line'), 1011585)", "def test_merge(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n # add a feature already in the feature collection\n fc1.merge(fc1)\n assert len(fc1.features) == 1\n\n # add a new feature to the feature collection\n fc1.merge(fc2)\n assert len(fc1.features) == 2\n\n self.check_feature(fc1.features[0])\n self.check_feature(fc1.features[1], expected_name='Aegean Sea')", "def merge(df_list,merge_param_list):\n\n merged = funs.reduce(lambda left, right: pd.merge(left, right,how='inner', on=merge_param_list), df_list)\n\n return merged", "def merge(self):\n rdr = Reader(self.config)\n rdr.read_string(utils.paste(single_line=False))\n if len(rdr.get_entry_collection().entries) == 0:\n self.visual.error(\"Zero items extracted from the collection to merge.\")\n return\n eids = []\n for entry in rdr.get_entry_collection().entries.values():\n self.entry_collection.add_new_entry(entry)\n eids.append(entry.ID)\n self.selector.update_reference(self.reference_entry_id_list)\n # select them\n res = self.selector.select_by_id(eids)\n if res is None:\n self.visual.error(\"Failed to select merged entry!\")\n self.visual.log(\"Merged new entr{}:\".format(\"y\" if len(res) == 1 else \"ies\"))\n self.show_entries()", "def __gitCommitMerge(self):\n self.vcs.gitCommitMerge(self.project.getProjectPath())", "def test_04_merge_into(self):\n client = self.client\n\n # Download existing machines.\n machine_1 = check_json(client, 'api/db_default/v4/nts/machines/1')\n machine_3 = check_json(client, 'api/db_default/v4/nts/machines/3')\n # The test is boring if we don't have at least 1 run in each machine.\n self.assertTrue(len(machine_1['runs']) > 0)\n self.assertTrue(len(machine_3['runs']) > 0)\n\n data = {\n 'action': 'merge',\n 'into': '3',\n }\n resp = client.post('api/db_default/v4/nts/machines/1', data=data,\n headers={'AuthToken': 'test_token'})\n self.assertEqual(resp.status_code, 200)\n\n # Old machine should have disappeared.\n resp_2 = client.get('api/db_default/v4/nts/machines/1')\n self.assertEqual(resp_2.status_code, 404)\n\n # The other machine should have the union of all runs.\n machine_1['runs'] = [_hashabledict(run) for run in machine_1['runs']]\n machine_3['runs'] = [_hashabledict(run) for run in machine_3['runs']]\n allruns = set(machine_1['runs']).union(machine_3['runs'])\n resp_3 = check_json(client, 'api/db_default/v4/nts/machines/3')\n resp_3['runs'] = [_hashabledict(run) for run in resp_3['runs']]\n self.assertEqual(set(resp_3['runs']), allruns)", "def completeMerge(self):\n #--Remove lists that aren't the sum of at least two esps.\n srcMods = self.srcMods\n for levls in (self.levcs,self.levis):\n for listId in levls.keys():\n if len(srcMods[listId]) < 2 or levls[listId].isDeleted:\n self.records.remove(levls[listId])\n del levls[listId]\n del srcMods[listId]\n #--Log\n log = self.log\n for label, levls in (('Creature',self.levcs), ('Item',self.levis)):\n if not len(levls): continue\n log.setHeader(_('Merged %s Lists:') % (label,))\n for listId in sorted(levls.keys(),key=lambda a: a.lower() ):\n log(listId)\n for mod in srcMods[listId]:\n log(' '+mod)", "def right_merge(self,list_to_merge):\n self.items = self.items + list_to_merge\n return self.items", "def merge_content(self, other):\n self.__content += other.__content", "def merge(self):\n leftSon = self.nodes.getNode(0)\n rightSon = self.nodes.getNode(1)\n self.nodes.removeNode(0)\n self.nodes.removeNode(0)\n self.nodes.addNode(Node(leftSon, rightSon))", "def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)", "def merge(a, b):\n if isinstance(a, CONFIG_VALID) \\\n and isinstance(b, CONFIG_VALID):\n # dict update\n if isinstance(a, dict) and isinstance(b, dict):\n a.update(b)\n return a\n # list update\n _a = list(a)\n for x in list(b):\n if x not in _a:\n _a.append(x)\n return _a\n if a and b:\n raise Exception(\"Cannot merge\")\n raise NotImplementedError", "def __merge(self, year, month, day):\n print 'Merge...'\n logging.info('[merge]->Merge...')\n\n k7dir = self.aodSetting.k7_dir # path.join(baseDir, 'k7')\n mdir = self.aodSetting.merge_dir # path.join(baseDir, 'merge')\n t = datetime.datetime(year, month, day)\n\n stids = self.aodSetting.stations.getstIds()\n\n # Loop - merge k7 files for each station\n for stid in stids:\n stk7dir = path.join(\n k7dir, stid, t.strftime('%Y%m'), t.strftime('%d'))\n if not path.isdir(stk7dir):\n continue\n\n fns = glob.glob(path.join(stk7dir, '*.k7'))\n if len(fns) == 0:\n continue\n\n # check k7 and remove it if empty file\n for fn in fns:\n if path.getsize(fn) == 0:\n print 'Empty K7 [{0}] => {1} '.format(stid, fn)\n logging.info(\n '[merge]->Empty K7 [{0}] => {1}'.format(stid, fn))\n fns.remove(fn)\n\n stmdir = path.join(mdir, stid, t.strftime('%Y%m'))\n if not os.path.exists(stmdir):\n os.makedirs(stmdir)\n\n outfn = path.join(stmdir, stid + '_' +\n t.strftime('%Y%m%d') + '_merge.k7')\n spdata.merge_files(fns, outfn)\n print 'Merge [{0}] => {1}'.format(stid, outfn)\n logging.info('[merge]->Merge [{0}] => {1}'.format(stid, outfn))\n\n print 'Merge Done!'\n logging.info('[merge]->Merge Done!')", "def run_merge(*src, argv=None):\n tmpdir = tempfile.mkdtemp()\n inputs = []\n for i, drv in enumerate(src):\n if type(drv) != str:\n tmppath = os.path.join(tmpdir, \"input_%s.tif\" % i)\n drv.write(tmppath)\n inputs.append(tmppath)\n else:\n inputs.append(src)\n\n if \"-o\" in argv:\n outpath = argv[argv.index(\"-o\") + 1]\n else:\n outpath = os.path.join(tempfile.gettempdir(), \"%s.tif\" % next(tempfile._get_candidate_names()))\n logger.debug(\"Writing to file %s\" % outpath)\n argv += [\"-o\", outpath]\n argv = gdal.GeneralCmdLineProcessor(argv)\n options = argv + inputs\n assert run_external_app(\"gdal_merge.py\", options) == 0, \"Error running gdal_merge\"\n remove_directory(tmpdir)\n return gdal.Open(outpath)", "def db_merge(queryset, expected, key_fn):\n a = merge.key(queryset, key_fn)\n b = merge.key(expected, key_fn)\n new_objects = []\n del_objects = []\n fields = queryset.model._meta.get_fields()\n for (x, y) in merge.merge(a, b):\n if x is None: # add object\n new_objects.append(y)\n elif y is None: # delete object\n x.delete()\n else: # update object\n changed = False\n for field in fields:\n if field.auto_created:\n continue\n if getattr(x, field.name) != getattr(y, field.name):\n setattr(x, field.name, getattr(y, field.name))\n changed = True\n if changed:\n x.save()\n if new_objects:\n queryset.model.objects.bulk_create(new_objects)", "def rmerge(*colls):\n if isinstance(colls, tuple) and len(colls) == 1:\n # A squeeze operation since merge_with generates tuple(list_of_objs,)\n colls = colls[0]\n if all(is_mapping, colls):\n # Merges all the collections, recursively applies merging to the combined values\n return merge_with(rmerge, *colls)\n else:\n # If colls does not contain mappings, simply pick the last one\n return last(colls)" ]
[ "0.8324286", "0.775882", "0.7348654", "0.73391783", "0.711605", "0.6975439", "0.6975439", "0.6975439", "0.68910533", "0.6854348", "0.6818201", "0.67674536", "0.6687539", "0.6674524", "0.6658785", "0.6657529", "0.66195583", "0.65894455", "0.6566261", "0.65008974", "0.6500515", "0.649734", "0.648821", "0.6474986", "0.6433792", "0.6432626", "0.6401819", "0.63838875", "0.6375337", "0.6346526", "0.62883115", "0.6287753", "0.62627226", "0.62535256", "0.62162554", "0.62136286", "0.6202556", "0.6194198", "0.6192331", "0.6176665", "0.6171547", "0.6167215", "0.61476064", "0.61468023", "0.6100024", "0.60911155", "0.608956", "0.6078283", "0.60615426", "0.6051151", "0.6047925", "0.6039703", "0.60331804", "0.6029505", "0.59863454", "0.59667534", "0.59643686", "0.59582865", "0.59456694", "0.59414905", "0.59372574", "0.59324265", "0.5930379", "0.5929911", "0.59231186", "0.5922848", "0.5911672", "0.59015834", "0.58971983", "0.5890934", "0.5882175", "0.5873166", "0.58715945", "0.5871366", "0.5870386", "0.5865252", "0.58651525", "0.5863023", "0.58623314", "0.5860195", "0.5840533", "0.5836945", "0.58353025", "0.58281064", "0.5823734", "0.5818708", "0.58139867", "0.58114654", "0.58065546", "0.5804134", "0.5799928", "0.57993907", "0.5790916", "0.5786328", "0.57837534", "0.57756615", "0.5773679", "0.5772729", "0.5768964", "0.57686377", "0.57623273" ]
0.0
-1
Merge this Box with one or more other Box instances
def merge(self, box, in_place=True): if in_place: self._merge(box) else: base = self.copy() base._merge(box) return base
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _merge(self, box_list):\n if isinstance(box_list, self.__class__):\n box_list = [box_list]\n for box in box_list:\n for row in box:\n row[IND] = len(self)\n self.append(row)\n self._combine(row)", "def merge(self, other):\n\n for child in other.children:\n self.add_deep_copy_of(child, merged=True)", "def union(self, other):\n self.vertices.extend(other.vertices)\n self.edges.extend(other.edges)\n self.faces.extend(other.faces)\n return self", "def union(one, other):\n left = min(one.left, other.left)\n right = max(one.right, other.right)\n top = min(one.top, other.top)\n bottom = max(one.bottom, other.bottom)\n return BBox([[left, top], [right, bottom]])", "def append(self, other):\n for i in other.blocks:\n self.blocks.append(i)", "def mergeWith(self, others):", "def merge(self, obj):\n pass", "def combine(self, existing):\n return self", "def merge(self, other):\n\n assert self.ins_addr == other.ins_addr\n assert self.type == other.type\n\n o = self.copy()\n o.targets |= other.targets\n\n return o", "def merge(self, other):\n self._moments = merge_pqc([self, other])._moments\n self._parameters = sp.symarray(self.parameter_symbol, len(self.symbols))\n if self.flatten_circuit:\n self.flatten()", "def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)", "def merge(self):\n elems = []\n for x in self.elems:\n if isinstance(x, self.__class__):\n elems.extend(x.merge().elems)\n else:\n elems.append(x)\n return _coconut_tail_call(self.__class__, *elems)", "def merge(self, other):\n for p in other:\n for key, val in p.items():\n self.contents[key] = val\n\n return self", "def merge(self, other: \"BB\") -> \"BB\":\n cp_bb = lib.cpBBMerge(self, other)\n return BB(cp_bb.l, cp_bb.b, cp_bb.r, cp_bb.t)", "def union_boxes(boxes: List[\"Box\"]) -> \"Box\":\n left, top, right, bottom = float(\"inf\"), float(\"inf\"), float(\"-inf\"), float(\"-inf\")\n for box in boxes:\n l, t, r, b = box.coordinates\n left = min(left, l)\n top = min(top, t)\n right = max(right, r)\n bottom = max(bottom, b)\n return Box(left, top, right - left, bottom - top)", "def add_box(self, box):\n mz_from = box.from_mz\n mz_to = box.to_mz\n rt_from = box.from_rt\n rt_to = box.to_rt\n self.boxes_mz.addi(mz_from, mz_to, box)\n self.boxes_rt.addi(rt_from, rt_to, box)", "def merge(self, other):\n extras = other.difference(self)\n if len(extras) > 0:\n self.update(extras)\n self.reset()\n return True\n return False", "def __add__(self, other):\n mesh = deepcopy(self)\n mesh.MergeWith(other)\n return mesh", "def build_boxes(self):\n for index in self.box_space.points:\n if self.rank_of_box[index] == self.my_rank:\n self.my_boxes.append(Box(self, index))", "def Add(self, *args):\n return _Bnd.Bnd_Box_Add(self, *args)", "def mergeBboxes(bboxes, bboxes_prev):\n bboxes_merged = deepcopy(bboxes)\n for bbox in bboxes_prev:\n is_exist = False\n for bbox_merged in bboxes_merged:\n if bbox.object_id == bbox_merged.object_id:\n is_exist = True\n bbox_merged.visible = bbox.visible\n break\n if not is_exist:\n bboxes_merged.append(bbox)\n return bboxes_merged", "def extend(self, other):\n # iterate through other deque\n for item in other:\n # if the current item's data is None\n if item is None:\n # that deque is empty, so we're done\n break\n # if other deque has items, push back current item and loop\n self.push_back(item)", "def merge(self):\n leftSon = self.nodes.getNode(0)\n rightSon = self.nodes.getNode(1)\n self.nodes.removeNode(0)\n self.nodes.removeNode(0)\n self.nodes.addNode(Node(leftSon, rightSon))", "def merge(self, other):\n log.debug('Merging: %s and %s' % (self.serialize(), other.serialize()))\n for k in self.keys():\n for new_item in other[k]:\n if new_item not in self[k]:\n self[k].append(new_item)\n log.debug('Result: %s' % self.serialize())\n return self", "def merge(self,other):\n if self.empty: \n self.copy(other)\n return self\n elif other.empty:\n return self\n if(other.vmin < self.vmin):\n self.vmin = other.vmin\n if(other.vmax > self.vmax):\n self.vmax = other.vmax\n\n nA = float(self.vcount)\n nB = float(other.vcount)\n nAB = nA*nB\n nAA = float(self.vcountsq)\n nBB = float(other.vcountsq)\n nX = nA+nB\n nXX = nX**2 #nAA+nBB+2*nAB #nX**2 # actually (nA+nB)^2 = (nAA+nBB+2*nAB)\n nXXX = nXX*nX\n self.vcount = nX\n self.vcountsq = nXX\n\n self.vsum += other.vsum;\n\n # merge of mean and m2\n delta = other.vmean-self.vmean;\n delta2 = delta**2\n delta3 = delta**3\n delta4 = delta**4\n self.vmean += delta*nB/nA\n self.vm2 += other.vm2 + delta2*(nAB/nX)\n self.vm3 += other.vm3 + delta3*(nAB*(nA-nB))/nXX + 3*delta*(nA*other.vm2-nB*self.vm2)/nX\n self.vm4 += other.vm4 + delta4*(nAB*(nAA-nAB+nBB))/nXXX + 6*delta2*(nAA*other.vm2+nBB*self.vm2)/nXX + 4*delta*(nA*other.vm3-nB*self.vm3)/nX\n self.dirty = True\n return self", "def _merge(self):\n raise NotImplementedError", "def union_of_bboxes(height: int, width: int, bboxes: Sequence[BoxType], erosion_rate: float = 0.0) -> BoxType:\n x1, y1 = width, height\n x2, y2 = 0, 0\n for bbox in bboxes:\n x_min, y_min, x_max, y_max = bbox[:4]\n w, h = x_max - x_min, y_max - y_min\n lim_x1, lim_y1 = x_min + erosion_rate * w, y_min + erosion_rate * h\n lim_x2, lim_y2 = x_max - erosion_rate * w, y_max - erosion_rate * h\n x1, y1 = np.min([x1, lim_x1]), np.min([y1, lim_y1])\n x2, y2 = np.max([x2, lim_x2]), np.max([y2, lim_y2])\n return x1, y1, x2, y2", "def union(self, other): # -> BaseGeometry:\n ...", "def merge(self, other: 'Basket') -> None:\n for item in other:\n try:\n existing = self.items.get(ref=item.ref)\n existing.quantity += item.quantity\n existing.save(update_fields=['quantity'])\n except item.DoesNotExist:\n item.basket = self\n item.save(update_fields=['basket'])\n other.delete()\n self._cached_items = None", "def merge(self, other):\n\n for n in other.cfg_nodes:\n self.insert_cfgnode(n)\n\n for ins_addr, outs in other.out_branches.items():\n if ins_addr in self.out_branches:\n for stmt_idx, item in outs.items():\n if stmt_idx in self.out_branches[ins_addr]:\n self.out_branches[ins_addr][stmt_idx].merge(item)\n else:\n self.out_branches[ins_addr][stmt_idx] = item\n\n else:\n item = next(iter(outs.values()))\n self.out_branches[ins_addr][item.stmt_idx] = item", "def extend(self, other):\n for x, y in other:\n self.add(x, y)", "def mergeWith(self, other):\n assert not other.synthesised\n self.globals.update(other.globals)\n self.signals.update(other.signals)\n self.startsOfDataPaths.update(other.startsOfDataPaths)\n self.subUnits.update(other.subUnits)\n \n for s in other.signals:\n s.ctx = self", "def merged_rep(self,other):\n raise NotImplementedError(\"Abstract method\")", "def __iadd__(self, other):\n self.MergeWith(other)\n return self", "def __add__(self, other):\n self.__dict__.update(other)\n return self", "def combine(self, other) -> None:\n assert self.id_ == other.id_\n assert self.type_ == other.type_\n self.count += other.count", "def Add(self, *args):\n return _Bnd.Bnd_Box2d_Add(self, *args)", "def merge(self, other_btree):\n pass", "def extend(self, other):\n if len(self.vertices[0]) != len(other.vertices[0]):\n raise ValueError(\"Rank mismatch ({0} != \"\n \"{1})\".format(self.vertices.shape[1],\n other.vertices.shape[1]))\n if self._geotype != other._geotype:\n raise TypeError(\"Geometry mismatch ({0} != \"\n \"{1})\".format(self._geotype, other._geotype))\n\n self.vertices = np.vstack([self.vertices, other.vertices])\n self._cache = {}\n return self", "def __finalize__(self, other, method=None, **kwargs):\n self = super().__finalize__(other, method=method, **kwargs)\n # merge operation: using metadata of the left object\n if method == \"merge\":\n for name in self._metadata:\n print(\"self\", name, self.au_columns, other.left.au_columns)\n object.__setattr__(self, name, getattr(other.left, name, None))\n # concat operation: using metadata of the first object\n elif method == \"concat\":\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other.objs[0], name, None))\n return self", "def merge_bowl(self):\n self.B += self.Bowl_bS\n self.Bc += self.Bowl_bC\n self.W += self.Bowl_WS\n self.Wc += self.Bowl_WS", "def merge_other(self, other):\n assert(not other.isSet())\n with self.__cond:\n if self.__isset:\n other.set(self.__data)\n else:\n self.__merged.append(other)", "def update(self, other):\n if not isinstance(other, (list, np.ndarray, IndexCollection)):\n other = [other]\n for item in other:\n self.add(item)\n return self", "def merge_content(self, other):\n self.__content += other.__content", "def test_merge_overlapping_boxes(self):\n # box2 contains box1\n box1 = ((1, 2), (1, 2), (1, 2))\n box2 = ((1, 3), (1, 3), (1, 3))\n mapping = {box1: [1, 2, 3, 4], box2: [1, 2, 3, 4, 5]}\n boxes = [box1, box2]\n merged_boxes, _ = dc.dock.binding_pocket.merge_overlapping_boxes(mapping,\n boxes)\n print(\"merged_boxes\")\n print(merged_boxes)\n assert len(merged_boxes) == 1\n assert merged_boxes[0] == ((1, 3), (1, 3), (1, 3))\n\n # box1 contains box2\n box1 = ((1, 3), (1, 3), (1, 3))\n box2 = ((1, 2), (1, 2), (1, 2))\n mapping = {box1: [1, 2, 3, 4, 5, 6], box2: [1, 2, 3, 4]}\n boxes = [box1, box2]\n merged_boxes, _ = dc.dock.binding_pocket.merge_overlapping_boxes(mapping,\n boxes)\n print(\"merged_boxes\")\n print(merged_boxes)\n assert len(merged_boxes) == 1\n assert merged_boxes[0] == ((1, 3), (1, 3), (1, 3))\n\n # box1 contains box2, box3\n box1 = ((1, 3), (1, 3), (1, 3))\n box2 = ((1, 2), (1, 2), (1, 2))\n box3 = ((1, 2.5), (1, 2.5), (1, 2.5))\n mapping = {\n box1: [1, 2, 3, 4, 5, 6],\n box2: [1, 2, 3, 4],\n box3: [1, 2, 3, 4, 5]\n }\n merged_boxes, _ = dc.dock.binding_pocket.merge_overlapping_boxes(mapping,\n boxes)\n print(\"merged_boxes\")\n print(merged_boxes)\n assert len(merged_boxes) == 1\n assert merged_boxes[0] == ((1, 3), (1, 3), (1, 3))", "def combine(self, other):\n # Copy and merge\n ppt = PPT()\n ppt.contents = dict(self.contents)\n ppt.merge(other)\n return ppt", "def concat(self, other):\n self.add_rules(other.cliques)\n self.prop_names.update(other.prop_names)", "def update(self, other):\n b = self.hallucinate_merge(other)\n self.l_child = b.l_child\n self.r_child = b.r_child", "def __finalize__(self, other, method=None, **kwargs):\r\n # merge operation: using metadata of the left object\r\n if method == 'merge':\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other.left, name, None))\r\n # concat operation: using metadata of the first object\r\n elif method == 'concat':\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other.objs[0], name, None))\r\n else:\r\n for name in self._metadata:\r\n object.__setattr__(self, name, getattr(other, name, None))\r\n return self", "def extend(self, other_rollout):\n\n assert not self.is_terminal()\n assert all(k in other_rollout.fields for k in self.fields)\n for k, v in other_rollout.data.items():\n self.data[k].extend(v)\n self.last_r = other_rollout.last_r", "def merge(self, *other):\n # Compute union of Fingerprints\n union = set().union(self, *other)\n # Create new fingerprint from union\n result = super(Fingerprint, type(self)).__new__(type(self), union)\n # Set n_flows to combination of self and other\n result.__setattr__('n_flows', self.n_flows + sum(o.n_flows for o in other))\n # Return result\n return result", "def merge(self, other):\n self.isotxsMetadata = self.isotxsMetadata.merge(\n other.isotxsMetadata, self, other, \"ISOTXS\", AttributeError\n )\n self.gamisoMetadata = self.gamisoMetadata.merge(\n other.gamisoMetadata, self, other, \"GAMISO\", AttributeError\n )\n self.pmatrxMetadata = self.pmatrxMetadata.merge(\n other.pmatrxMetadata, self, other, \"PMATRX\", AttributeError\n )\n self.micros.merge(other.micros)\n self.gammaXS.merge(other.gammaXS)\n self.neutronHeating = _mergeAttributes(self, other, \"neutronHeating\")\n self.neutronDamage = _mergeAttributes(self, other, \"neutronDamage\")\n self.gammaHeating = _mergeAttributes(self, other, \"gammaHeating\")\n self.isotropicProduction = _mergeAttributes(self, other, \"isotropicProduction\")\n self.linearAnisotropicProduction = _mergeAttributes(\n self, other, \"linearAnisotropicProduction\"\n )\n # this is lazy, but should work, because the n-order wouldn't be set without the others being set first.\n self.nOrderProductionMatrix = (\n self.nOrderProductionMatrix or other.nOrderProductionMatrix\n )", "def union(self, other):\n self.find_set()._link(other.find_set())", "def add_box(self):\n self.scenes[self.current_scene].add_object(Box())\n self.redraw()", "def combine(self, other_blocks):\n if type(other_blocks) == type(self):\n other_blocks = [other_blocks];\n\n for block in other_blocks:\n for i in range(block.n_users):\n self.add_user(block.user_ids[i], block.user_points[i], False);\n \n self.update();", "def _merge(self, other: dict):\n self._storage = dict_merge(self._storage, other)", "def combine(self, other) -> None:\n assert self.linenum == other.linenum\n assert self.source == other.source\n if other.coverage >= 0:\n if self.coverage < 0:\n self.coverage = other.coverage\n else:\n self.coverage += other.coverage\n for branch in other.values():\n self.add_branch(branch)", "def union(self, *args):\n _ub = None\n for _obj in args:\n if _ub is None:\n _ub = self.bbox(_obj)\n else:\n _b = self.bbox(_obj)\n _x = np.sort(np.array([_b[:, 0], _ub[:, 0]]), axis=None)\n _y = np.sort(np.array([_b[:, 1], _ub[:, 1]]), axis=None)\n _ub = np.array([[_x[0], _y[0]], [_x[3], _y[3]]])\n return _ub", "def union(self, other):\n VERIFICATION.verify_type(other, Rect, \"union target must be Rect\")\n\n funcs = (min, max, min, max)\n union_tuple = self._apply_binary_funcs(other, funcs)\n return Rect(*union_tuple)", "def __iadd__(self, other):\n\t\t#print(\"iadd\")\t\t\n\t\t# merge other branch\t\t\n\t\tself.graph.update(other.graph)\n\t\tself.bottoms.update(other.bottoms)\n\t\tself.output_shape.update(other.output_shape)\n\t\tlayer_name = \"iadd_{}\".format(len(self.graph))\n\t\tself.graph[layer_name] = layer_name\n\t\tself.bottoms[layer_name] = [self.cur_id, other.cur_id]\n\t\tself.output_shape[layer_name] = self.cur_tensor.size()\n\t\tself.cur_id = layer_name\n\t\t# save memory\n\t\tdel other\t\t\n\t\treturn self", "def __init__(self, v1, v2, *opts, **kwargs):\n Item.__init__(self, \"box\", (v1, v2), opts, **kwargs)", "def __iadd__(self, other):\n if not isinstance(other, IDStruct):\n raise TypeError(\"other is not of type IDStruct\")\n for left, right in other:\n self.add(left, right)\n # retain debug information\n self.transfer_debug(left, other)\n return self", "def add_image(self, other):\n newcls = self.__class__(None)\n newcls.polygon = self.union(other)\n\n newcls._members = []\n for v in self.members:\n newcls._members.append(v)\n for v in other.members:\n if v not in newcls._members:\n newcls._members.append(v)\n\n if self.is_mf_mosaic or other.is_mf_mosaic:\n newcls._update_mosaic_flag_id(True)\n else:\n newcls._update_mosaic_flag_id(None)\n\n return newcls", "def merge_graph(self, other):\n self.add_nodes( (nLabel,nInfo) for nLabel,nInfo in other.nodes() )\n \n for nLabel,nInfo in other.nodes():\n for edgeLabel,edgeInfo in other.edgesFrom(nLabel):\n self.add_edge(edgeLabel,edgeInfo)", "def merge(self, object_class):\n other_oc = self.schema.get_object_class(object_class)\n self.required_attrs |= other_oc.required_attrs\n self.allowed_attrs |= other_oc.allowed_attrs", "def merge(self, skel):\n return Skeleton.simple_merge((self, skel)).consolidate()", "def union(self, other: Catalog) -> Catalog:\n cat = self.copy()\n oth_cp = other.copy()\n\n for k in oth_cp.keys():\n for ver_id, version in oth_cp[k].versions.items():\n cat[k][ver_id] = version\n return cat", "def copy_with_children(self, new_children):\n new_box = self.copy()\n new_box.children = list(new_children)\n\n # Clear and reset removed decorations as we don't want to keep the\n # previous data, for example when a box is split between two pages.\n self.remove_decoration_sides = set()\n\n return new_box", "def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)", "def update(self, other):\n _merge_dicts(self, other)", "def merge(self, queue2):\n with self.mutating:\n self.queue = list(heapq.merge(self.queue, queue2))\n heapq.heapify(self.queue)", "def __add__(self, other):\n try:\n self.cards += other.cards\n except AttributeError:\n is_a_card = [isinstance(card, Card) for card in other]\n if all(is_a_card):\n self.cards += other\n else:\n raise NotImplementedError(\n \"Only a Deck/View, or list of Cards can be added to this class\"\n )\n return self", "def _set_boxes(self, listOfBoxes):\n self._boxes = listOfBoxes", "def _MixCollections(bc1, bc2):\n new = _BinsCollection()\n for i, b1 in enumerate(bc1.GetBins()):\n b2 = bc2.GetBin(i)\n new_bin = binner.Bin()\n new_bin.SetFixedMean(b1.GetFixedMean())\n assert all(b1.GetFixedMean() == b2.GetFixedMean())\n for p in b1.GetPoints():\n new_bin.AddPoint(p)\n for p in b2.GetPoints():\n new_bin.AddPoint(p)\n new.AddBin(new_bin)\n return new", "def __iadd__(self, other):\n self.children.append(other)\n return self", "def union(self, other):\n if isinstance(other, list):\n self.substrates = unary_union([self.substrates] + other)\n elif isinstance(other, Substrate):\n self.substrates = unary_union([self.substrates, other.substrates])\n else:\n self.substrates = unary_union([self.substrates, other])\n self.oriented = False", "def refresh(self):\n\n assets = sorted(self._get_assets())\n self._assets_box.populate(assets)\n\n subsets = sorted(self._get_subsets())\n self._subsets_box.populate(subsets)\n\n representations = sorted(self._get_representations())\n self._representations_box.populate(representations)", "def merge(self, other):\n merged = copy.deepcopy(self.__dict__())\n for k, v in other.__dict__():\n if k in merged and getattr(self, k):\n if isinstance(v, (string_types, bool)):\n pass\n else:\n list_of_stuff = merged.get(k, [])\n for entry in v:\n if entry not in list_of_stuff:\n list_of_stuff.append(entry)\n merged[k] = list_of_stuff\n else:\n merged[k] = v\n return CondaEnvironmentProvider(**merged)", "def mergeGroup(self):\n if len(self) < 2:\n return\n mainItem = self[0]\n for item in self[1:]:\n mainItem.textLines.extend(item.textLines)\n mainItem.height = reduce(lambda x,y: x+y, [item.height for item in\n self])", "def _merge_raw(self, other):\n if other is None:\n variables = OrderedDict(self.variables)\n else:\n # don't align because we already called xarray.align\n variables = merge_coords_without_align(\n [self.variables, other.variables])\n return variables", "def __add__(self, other):\n assert isinstance(other, HStruct)\n return HStruct(*self.fields, *other.fields)", "def merge(self, other):\n self._mergeKeys(other)\n self._binaryOperationCheck(other)\n for id in self.clock.keys():\n print id\n self.clock[id] = max(self.clock[id], other.clock[id])", "def __add__(self, other):\n new = self.__class__()\n new.extend(self)\n new.extend(other)\n return new", "def __init__(self,\n mass_1, mass_2,\n width_1, width_2,\n x0_1, x0_2,\n v0_1=0, v0_2=0,\n h=0.1):\n self.box_1 = box.box(mass_1, width_1, x0_1, v0_1)\n self.box_2 = box.box(mass_2, width_2, x0_2, v0_2)\n self.h = h\n self.coll_counter = 0", "def hallucinate_merge(self, other):\n res = CompleteVec(None,None,self.max_num_samples)\n res.needs_update = True\n return res", "def merge(self, other: ProjectMeta) -> ProjectMeta:\n return self.clone(\n obj_classes=self._obj_classes.merge(other.obj_classes),\n tag_metas=self._tag_metas.merge(other._tag_metas),\n )", "def __isub__(self, other):\n\t\t#print(\"isub\")\n\t\t# merge other branch\n\t\tself.graph.update(other.graph)\n\t\tself.bottoms.update(other.bottoms)\n\t\tself.output_shape.update(other.output_shape)\n\t\tlayer_name = \"sub_{}\".format(len(self.graph))\n\t\tself.graph[layer_name] = layer_name\n\t\tself.bottoms[layer_name] = [self.cur_id, other.cur_id]\n\t\tself.output_shape[layer_name] = self.cur_tensor.size()\n\t\tself.cur_id = layer_name\n\t\t# save memory\n\t\tdel other\n\t\treturn self", "def union(self, other):\n from sage.misc.misc import deprecation\n deprecation('The function union is replaced by convex_hull.', 'Sage Version 4.4.4')\n return self.convex_hull(other)", "def _merge_proposal_boxes_into_roidb(self, roidb, box_list):\n assert len(box_list) == len(roidb)\n for i, entry in enumerate(roidb):\n boxes = box_list[i]\n num_boxes = boxes.shape[0]\n gt_overlaps = np.zeros(\n (num_boxes, entry['gt_overlaps'].shape[1]),\n dtype=entry['gt_overlaps'].dtype\n )\n box_to_gt_ind_map = -np.ones(\n (num_boxes), dtype=entry['box_to_gt_ind_map'].dtype\n )\n\n # Note: unlike in other places, here we intentionally include all gt\n # rois, even ones marked as crowd. Boxes that overlap with crowds will\n # be filtered out later (see: _filter_crowd_proposals).\n gt_inds = np.where(entry['gt_classes'] > 0)[0]\n if len(gt_inds) > 0:\n gt_boxes = entry['boxes'][gt_inds, :]\n gt_classes = entry['gt_classes'][gt_inds]\n proposal_to_gt_overlaps = bbox_overlaps(\n boxes.astype(dtype=np.float32, copy=False),\n gt_boxes.astype(dtype=np.float32, copy=False)\n )\n # Gt box that overlaps each input box the most\n # (ties are broken arbitrarily by class order)\n argmaxes = proposal_to_gt_overlaps.argmax(axis=1)\n # Amount of that overlap\n maxes = proposal_to_gt_overlaps.max(axis=1)\n # Those boxes with non-zero overlap with gt boxes\n I = np.where(maxes > 0)[0]\n # Record max overlaps with the class of the appropriate gt box\n gt_overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]\n box_to_gt_ind_map[I] = gt_inds[argmaxes[I]]\n entry['boxes'] = np.append(\n entry['boxes'],\n boxes.astype(entry['boxes'].dtype, copy=False),\n axis=0\n )\n entry['gt_classes'] = np.append(\n entry['gt_classes'],\n np.zeros((num_boxes), dtype=entry['gt_classes'].dtype)\n )\n entry['seg_areas'] = np.append(\n entry['seg_areas'],\n np.zeros((num_boxes), dtype=entry['seg_areas'].dtype)\n )\n entry['gt_overlaps'] = np.append(\n entry['gt_overlaps'].toarray(), gt_overlaps, axis=0\n )\n entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps'])\n entry['is_crowd'] = np.append(\n entry['is_crowd'],\n np.zeros((num_boxes), dtype=entry['is_crowd'].dtype)\n )\n entry['box_to_gt_ind_map'] = np.append(\n entry['box_to_gt_ind_map'],\n box_to_gt_ind_map.astype(\n entry['box_to_gt_ind_map'].dtype, copy=False\n )\n )", "def merge_assets(self, other):\n for asset in other.asset:\n if find_elements(root=self.asset, tags=asset.tag,\n attribs={\"name\": asset.get(\"name\")}, return_first=True) is None:\n self.asset.append(asset)", "def extend(self, objects: Iterable[Any]) -> None:\n from ..pane import panel\n new_objects = list(self)\n new_objects.extend(list(map(panel, objects)))\n self.objects = new_objects", "def Add(self, *args):\n return _Bnd.Bnd_BoundSortBox2d_Add(self, *args)", "def update(self, other):\n if not other:\n return\n for o in other:\n self.replace(o)", "def merge_overlapping(boxes, max_overlap=0.05):\n def overlaps(bi, bj):\n return (bi.overlap(bj) >= max_overlap\n or bj.overlap(bi) >= max_overlap)\n\n def merge_into(boxes, box):\n overlapping = [b for b in boxes if overlaps(box, b)]\n if (len(overlapping) == 0):\n return (boxes + [box])\n else:\n preserved = [b for b in boxes if not overlaps(box, b)]\n merged = covering_box(overlapping + [box])\n return (merge_into(preserved, merged))\n\n boxes_merged = []\n for b in boxes:\n boxes_merged = merge_into(boxes_merged, b)\n return boxes_merged", "def _merge_inplace(self, other):\n if other is None:\n yield\n else:\n # don't include indexes in priority_vars, because we didn't align\n # first\n priority_vars = OrderedDict(\n (k, v) for k, v in self.variables.items() if k not in self.dims)\n variables = merge_coords_without_align(\n [self.variables, other.variables], priority_vars=priority_vars)\n yield\n self._update_coords(variables)", "def merge(self, obj, **kwargs):\r\n raise NotImplementedError\r\n # if type(obj) == StreamFork:\r\n # node = obj.node\r\n # else:\r\n # node = obj\r\n #\r\n # self.stream.append(node)\r\n #\r\n # merge = MergeNode(**kwargs)\r\n # self.stream.append(merge)\r\n # self.stream.connect()\r", "def merge(self, parallel_seq):\n return VSeq(self._elements + parallel_seq._elements)", "def canBeMergedWith(self, other):", "def merge(self):\n collapsable = self.findSameSubtrees()\n\n dummy = GraphiusNode(-1, None)\n for i, node in self.nodes.items():\n dummy.addNeighbor(node)\n\n # Perform the merge\n self.mergeHelper(dummy, collapsable)\n\n # Regenerate trees\n newNodes = self.dfs(dummy)\n assert(-1 not in newNodes)\n self.nodes = newNodes", "def __add__(self, other):\n if not other:\n return self.clone()\n else:\n return self.using(join(self, other))" ]
[ "0.68560404", "0.6515574", "0.64632297", "0.63198626", "0.6211465", "0.62063205", "0.6064046", "0.5991924", "0.5917754", "0.5910379", "0.5859801", "0.58339703", "0.58313894", "0.57778674", "0.5742428", "0.57081246", "0.5687884", "0.56496143", "0.563442", "0.56266993", "0.5626118", "0.56121784", "0.5605912", "0.55999947", "0.558066", "0.5579901", "0.55539656", "0.55492246", "0.55463415", "0.5532292", "0.55274385", "0.55166984", "0.55111235", "0.55025864", "0.5495632", "0.54875505", "0.5485282", "0.54844904", "0.54804516", "0.54755014", "0.5471249", "0.54662156", "0.54543144", "0.54477185", "0.54377425", "0.5405874", "0.5393346", "0.53775704", "0.5362547", "0.53556585", "0.5326594", "0.5319401", "0.531517", "0.5305435", "0.5283096", "0.52738667", "0.52603", "0.5255089", "0.52547336", "0.5251983", "0.5243454", "0.52221024", "0.5211671", "0.51891744", "0.51881635", "0.5176953", "0.5162265", "0.5156755", "0.5145618", "0.5144794", "0.5140323", "0.51342314", "0.51297146", "0.5127975", "0.5127165", "0.511603", "0.511429", "0.5111478", "0.5109514", "0.51000535", "0.5089872", "0.5085532", "0.50841886", "0.50830805", "0.5063747", "0.50590914", "0.50492245", "0.5044724", "0.50413334", "0.50412774", "0.5037737", "0.503747", "0.5032055", "0.50259835", "0.5024475", "0.50211346", "0.5019208", "0.5015473", "0.501308", "0.5009596" ]
0.7087718
0
List the dependent data for each key where all keys are present
def vectors(self, keys, dct=None, labels="str", combine=True, indep_keys=None): keys = listify(keys) combined = self.combined() if combine else self filtered = self.filtered(keys, lst=combined) labels = "dict" if indep_keys is not None else labels if dct is not None: filtered = self.where(dct, filtered) out = {k: [] for k in keys} label_list = [] for dct in filtered: indep = dct[INDEP].copy() keys_to_find = [] for k in keys: if k in indep: out[k].append(indep.pop(k)) else: keys_to_find.append(k) if labels == "str": label = dict_to_str(indep, val_sep="=", key_sep=", ") else: label = indep label_list.append(label) dep = dct[DEP] for k in keys_to_find: out[k].append(dep[k]) lst_out = [out[k] for k in keys] if labels is not None and labels is not False: if indep_keys is None: lst_out.append(label_list) else: for k in indep_keys: lst_out.append([d[k] for d in label_list]) return tuple(lst_out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def keys(self, dependent=True, independent=False):\n out = set()\n for row in self:\n if independent:\n out.update(row[INDEP].keys())\n if dependent:\n out.update(row[DEP].keys())\n return out", "def deps_for(nodes, key):\n\n def _deps(key, path):\n if key not in nodes:\n return [key]\n\n if key in path:\n msg = \"Cycle detected between {} and {}\".format(\n path[0], path[-1])\n raise GraphError(msg)\n\n deps = nodes[key][\"required\"]\n trans = [_deps(dep, path + [key]) for dep in deps]\n return set(util.concat(deps, *trans))\n\n return _deps(key, [])", "def dependent_keys(tasks, complete=False):\n out = set()\n errors = set()\n stack = list(tasks)\n while stack:\n ts = stack.pop()\n key = ts.key\n if key in out:\n continue\n if not complete and ts.who_has:\n continue\n if ts.exception is not None:\n errors.add(key)\n if not complete:\n continue\n\n out.add(key)\n stack.extend(ts.dependencies)\n return out, errors", "def dep_tree(self, root):\n \n graph = {}\n for key,extract in self.extracts.items():\n graph[key] = set(extract.get('depends',[]))\n \n def _recurse(node):\n l = set([node])\n for n in graph[node]:\n l = l | _recurse(n)\n \n return l\n \n return _recurse(root)", "def keys(self):\r\n L = []\r\n for wr in self.data.keys():\r\n o = wr()\r\n if o is not None:\r\n L.append(o)\r\n return L", "def keys(self):\r\n L = []\r\n for wr in self.data.keys():\r\n o = wr()\r\n if o is not None:\r\n L.append(o)\r\n return L", "def get_run_dependencies(dependency_list):\n for dependency in dependency_list:\n if isinstance(dependency, dict):\n for key in dependency:\n yield key\n else:\n yield dependency", "def _object_requires_hdf5_recurse(curr: NDict, str_base: str = \"\") -> List[str]:\n keys = curr.keypaths()\n ans = []\n for k in keys:\n data = curr[k]\n if _object_requires_hdf5_single(data):\n ans.append(k)\n return ans", "def hold_keys(dsk, dependencies):\n dependents = reverse_dict(dependencies)\n data = {k for k, v in dsk.items() if type(v) not in (tuple, str)}\n\n hold_keys = list(data)\n for dat in data:\n deps = dependents[dat]\n for dep in deps:\n task = dsk[dep]\n # If the task is a get* function, we walk up the chain, and stop\n # when there's either more than one dependent, or the dependent is\n # no longer a get* function or an alias. We then add the final\n # key to the list of keys not to fuse.\n if _is_getter_task(task):\n try:\n while len(dependents[dep]) == 1:\n new_dep = next(iter(dependents[dep]))\n new_task = dsk[new_dep]\n # If the task is a get* or an alias, continue up the\n # linear chain\n if _is_getter_task(new_task) or new_task in dsk:\n dep = new_dep\n else:\n break\n except (IndexError, TypeError):\n pass\n hold_keys.append(dep)\n return hold_keys", "def dependents_key(self):\n return self.dependents_key_for(self.id)", "def combinations(self, key_list, lst=None):\n lst = self.filtered(key_list, lst)\n tups = [tuple([d[INDEP].get(k, d[DEP].get(k)) for k in key_list]) for d in lst]\n s = set(tups)\n l = list(s)\n l.sort()\n return [{k: v for k, v in zip(key_list, vals)} for vals in l]", "def get_all_dicts_by_key(pcb_data: List[Dict[str, Any]], key: str) -> List[Dict[str, Any]]:\n res: List[Dict[str, Any]] = list()\n for d in pcb_data:\n if isinstance(d, dict) and key in d.keys():\n res.append(d)\n return res", "def iterkeyrefs(self):\r\n return self.data.iterkeys()", "def iterkeyrefs(self):\r\n return self.data.iterkeys()", "def get_important_nodes(self, key=None):\n if not key:\n node = self._root\n else:\n node = self.get_node(key)\n\n if node:\n for n in node:\n if n.is_distributed() or n.has_data():\n yield n", "def get_dependencies(self, target, graph, dep_list):\n \n if graph == OrderedDict(): return\n if target in graph:\n dep_list.append(graph)\n return dep_list\n for key in graph:\n self.get_dependencies(target, graph[key], dep_list)\n return dep_list", "def _get_equivalences(self, key):\n return (\n self._graph[self._key_to_node_index[key]].equivs\n if key in self._key_to_node_index\n else []\n )", "def keyrefs(self):\r\n return self.data.keys()", "def keyrefs(self):\r\n return self.data.keys()", "def compute_dependencies(tables):\n tables = list(tables)\n graph = {}\n def visit_foreign_key(fkey):\n if fkey.use_alter:\n return\n parent_table = fkey.column.table\n if parent_table in tables:\n child_table = fkey.parent.table\n if parent_table is not child_table:\n graph.setdefault(parent_table, []).append(child_table)\n\n for table in tables:\n visitors.traverse(table,\n {'schema_visitor': True},\n {'foreign_key': visit_foreign_key})\n\n graph.setdefault(table, []).extend(table._extra_dependencies)\n\n return graph", "def items(self):\r\n L = []\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n L.append((o, value))\r\n return L", "def items(self):\r\n L = []\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n L.append((o, value))\r\n return L", "def list_defect_ref_keys(self):\n print('=======')\n print('DEFECT_REFs')\n print('=======')\n for key in self.defect_refs:\n print(key)", "def get_data_nodes(self, key=None):\n if not key:\n node = self._root\n else:\n node = self.get_node(key)\n\n if node:\n for n in node:\n if n.has_data():\n yield n", "async def parse(self, key) -> List[dict]:\n data = await self._get_data()\n output = []\n for group in data:\n for series in group.get(key, []):\n output.append(series)\n return output", "def secondary_keys_dicts(self):", "def list(self):\n return list(sorted(self.manager.data[\"dataset\"].keys()))", "def dependent_ids(self) -> List[str]:\n return list(map(as_text, self.connection.smembers(self.dependents_key)))", "async def get_all(self, key: datastore.Key) -> RV:\n\t\treturn await (await self.get(key)).collect() # type: ignore[return-value]", "def getDepList(self, dict):\n \n if( dict.has_key( self.name) ):\n return\n else:\n dict[ self.name ] = self.installPath\n\n if( len( dict ) > 1 ):\n mods = self.reqmodules + self.optmodules\n else:\n mods = self.reqmodules + self.optmodules + self.reqmodules_buildonly\n \n for modname in mods:\n if( self.parent.module(modname) != None ):\n self.parent.module(modname).getDepList( dict )", "def mandatory_info(self):\n return [info.key for info in self.entry_info if not info.optional]", "def keyValues(self): # real signature unknown; restored from __doc__\n return []", "def getall(self, key):\n return self.values.get(key, [])", "def keyrefs(self):\n return list(self.data)", "def keys(self):\n return [entry.key for entry in self.table if entry.value is not None]", "def collect_keys(self, list_):\n keys = list_.keys[:]\n todo = [list_]\n while 1:\n node = todo.pop()\n refs = []\n for ch in node.children:\n if ch.name == \"ref\": refs.append(ch)\n elif ch.name == \"element\" and ch.attr[\"name\"] in keys:\n k = ch.attr[\"name\"]\n list_.keymap[k] = ch\n keys.remove(k)\n if not keys: break\n for r in refs:\n d = self.defs[r.attr[\"name\"]]\n d.ref = r\n todo.append(d)\n for k in list_.keymap:\n out = list_.keymap[k]\n in_ = []\n while out.parent != list_:\n chs = out.parent.children[:]\n pos = chs.index(out)\n chs[pos:pos+1] = in_\n in_ = chs\n out = out.parent.ref\n pos = list_.children.index(out)\n list_.children[pos:pos+1] = in_", "def get_ordered_dependency_list(self):\n # Validate the graph\n self.validate()\n # Generate the dependency list\n dep_list = []\n for rosdep_key in self:\n if self[rosdep_key]['is_root']:\n dep_list.extend(self.__get_ordered_uninstalled(rosdep_key))\n # Make the list unique and remove empty entries\n result = []\n for item in dep_list:\n if item not in result and item[1] != []:\n result.append(item)\n # Squash the results by installer_key\n squashed_result = []\n previous_installer_key = None\n for installer_key, resolved in result:\n if previous_installer_key != installer_key:\n squashed_result.append((installer_key, []))\n previous_installer_key = installer_key\n squashed_result[-1][1].extend(resolved)\n return squashed_result", "def extract_deps(self, srcinfo):\n packages = {}\n pkgname = \"\"\n\n for i in srcinfo.split(\"\\n\"):\n if not i:\n continue\n if i[0] == \"#\":\n continue\n option = i.strip()\n key, value = option.split(\" = \")\n if key == \"pkgbase\":\n pkgname = value\n packages[pkgname] = []\n if key == \"makedepends\":\n packages[pkgname].append(value)\n # if key == \"depends\":\n # packages[pkgname].append(value)\n return packages", "def toposort2( data ):\n\n from functools import reduce\n\n # Ignore self dependencies.\n for k, v in data.items( ):\n v.discard( k )\n # Find all items that don't depend on anything.\n extra_items_in_deps = reduce( set.union, data.itervalues( ) ) - set( data.iterkeys( ) )\n # Add empty dependences where needed\n data.update( { item: set( ) for item in extra_items_in_deps } )\n while True:\n ordered = set( item for item, dep in data.iteritems( ) if not dep )\n if not ordered:\n break\n yield ordered\n data = { item: (dep - ordered)\n for item, dep in data.iteritems( )\n if item not in ordered }\n assert not data, \"Cyclic dependencies exist among these items:\\n%s\" % '\\n'.join(\n repr( x ) for x in data.iteritems( ) )", "def showDepend(self,childName):\n\tdList,idList,dict,dictId,graph=self.getAllParents(childName)\n# print dict\n# print dictId\n\tif dList:\n\t print \"\\nFor '%s' we found the following versions:\"%childName\n\t space = \"\"\n for item in dList:\n\t print item\n# if not len(space):\n# print \"%s %s\"%(space,item)\n# else:\n# print \"%s |-> %s\"%(space,item)\n# space=\" \"+space\n\telse:\n\t print \"No such data version found\",childName\n\treturn", "def all_key_seqs(template):\n result = []\n for k, v in template.iteritems():\n if isinstance(v, dict):\n for suffix in all_key_seqs(v):\n result.append([k] + suffix)\n else:\n result.append([k])\n return result", "def toposort(data):\n\n\n # Ignore self dependencies.\n for k, v in data.items():\n v.discard(k)\n # Find all items that don't depend on anything.\n extra_items_in_deps = \\\n reduce(set.union, data.itervalues()) - set(data.iterkeys())\n # Add empty dependences where needed\n data.update({item:set() for item in extra_items_in_deps})\n while True:\n ordered = set(item for item, dep in data.iteritems() if not dep)\n if not ordered:\n break\n yield ordered\n data = {item: (dep - ordered)\n for item, dep in data.iteritems()\n if item not in ordered}\n assert not data, \\\n \"Cyclic dependencies exist among these items:\\n{}\".format(\n '\\n'.join(repr(x) for x in data.iteritems()))", "def minimal(self):\n combined = self._combined\n out = []\n for k, d in combined.items():\n dct = d[INDEP].copy()\n dct.update(d[DEP])\n out.append(dct)\n return out", "def keys(self):\n list_all_dict = self.list_all()\n return list_all_dict[\"nodes\"] + list_all_dict[\"groups\"]", "def getDependencies(self) -> Mapping[Any, Any]:\n ret: Dict[Any, Any] = {}\n for dep in self.getDependenciesList():\n ret[dep['name']] = dep\n return ret", "def keys(self):\n # TODO: Collect all keys in each of the buckets\n all_keys = [] # Will store all the key\n for bucket in self.buckets:\n for key in bucket:\n if key is not None:\n all_keys.append(key[0])\n return all_keys", "def get_build_dependencies(dependency_list):\n for dependency in dependency_list:\n if isinstance(dependency, dict):\n for value_or_list in list(dependency.values()):\n if isinstance(value_or_list, list):\n for value in value_or_list:\n yield value\n else:\n yield value_or_list\n else:\n yield dependency", "def get_decs(self):\n return [i for i in self.decisions.keys()]", "def print_data():\r\n\r\n d = data()\r\n for i in d:\r\n for key, value in i.items():\r\n print(key, \" : \", value)\r\n print()", "def values(self):\n # TODO: Collect all values in each of the buckets\n all_values = [] # Will store all the key\n\n for bucket in self.buckets:\n for value in bucket:\n if value is not None:\n all_values.append(value[1])\n return all_values", "def getAllKeyValuePair(self,root,key):\n\n if root==None:\n return []\n \n node = root\n result = []\n\n for index,child in enumerate(node.children):\n if(child!=None):\n if(child.value!=None):\n result.append((key+str(index),child.value.value))\n \n result += self.getAllKeyValuePair(child,key+str(index))\n\n return result", "def find_dependants_recurse(key, rev_tree, previous=None):\n if previous is None:\n previous = set()\n if not key in rev_tree:\n return []\n this_level_dependants = set(rev_tree[key])\n next_level_dependants = set()\n for dependant in this_level_dependants:\n if dependant in previous:\n continue\n tmp_previous = previous.copy()\n tmp_previous.add(dependant)\n next_level_dependants.update(\n find_dependants_recurse(dependant, rev_tree,\n previous=tmp_previous,\n ))\n # ensures reloading order on the final list\n # by postponing the reload of modules in this level\n # that also appear later on the tree\n dependants = (list(this_level_dependants.difference(\n next_level_dependants)) +\n list(next_level_dependants))\n return dependants", "def get_data_names(self):\n return list(self.__data.keys())", "def show_values():\n dic_drg = {}\n dic_age = {}\n dic_sex = {}\n dic_sline = {}\n for tup in all_data:\n drg = tup[7]\n age = tup[9]\n sex = tup[10]\n sline = tup[14]\n\n dic_drg[drg] = 1\n dic_age[age] = 1\n dic_sex[sex] = 1\n dic_sline[sline] = 1\n\n print \"Age values\"\n for key in sorted(dic_age.keys()):\n print key\n\n print \"Sex values\"\n for key in sorted(dic_sex.keys()):\n print key\n\n print \"Service line values\"\n for key in sorted(dic_sline.keys()):\n if key is None or len(key) == 0:\n continue\n print \"'\" + key + \"',\",\n print\n\n print \"Drg values\"\n for key in sorted(dic_drg.keys()):\n if key is None or len(key) == 0:\n continue\n print\"'\" + key + \"',\",\n print", "def get_fields(data):\n return data['train'][data['train'].keys()[0]].attrs.keys()", "def getDependenciesList(self) -> List[Mapping[Any, Any]]:\n if self._dependencyList is not None:\n return self._dependencyList\n\n chartfile = self.getChartFile()\n if chartfile['apiVersion'] == 'v2':\n if 'dependencies' in chartfile:\n self._dependencyList = chartfile['dependencies']\n else:\n self._dependencyList = []\n elif chartfile['apiVersion'] == 'v1':\n self.readArchiveFiles()\n if self._archiveFiles is not None and 'requirements.yaml' in self._archiveFiles:\n self._dependencyList = self._getFile('requirements.yaml')['dependencies']\n else:\n self._dependencyList = []\n else:\n raise ConfigurationError('Unknown chart file version: {}'.format(chartfile))\n return self._dependencyList", "def list_values(key):\n return meta.list_values(key=key)", "def _get_keys(self, ckey):\n if self.has_key(ckey):\n doc = self[ckey]\n else:\n doc = [o for o in self.get_values(ckey)]\n if isinstance(doc, dict):\n for key in doc.keys():\n if ckey.rfind('%s.' % key) == -1:\n combo = '%s.%s' % (ckey, key)\n yield combo\n vals = [v for v in self.get_values(combo)]\n for kkk in helper_loop(combo, vals):\n yield kkk\n else:\n yield ckey\n elif isinstance(doc, list):\n for item in doc:\n if isinstance(item, dict):\n for key in item.keys():\n if ckey.rfind('%s.' % key) == -1:\n combo = '%s.%s' % (ckey, key)\n yield combo\n vals = [v for v in self.get_values(combo)]\n for kkk in helper_loop(combo, vals):\n yield kkk\n elif isinstance(item, list):\n for elem in item:\n if isinstance(elem, dict):\n for kkk in elem.keys():\n yield '%s.%s' % (ckey, kkk)\n else:\n yield ckey\n else: # basic type, so we reach the end\n yield ckey\n else: # basic type, so we reach the end\n yield ckey", "def generate_data(item, target='key' or 'value'):\n data = []\n target_is_key = target == 'key'\n for key, value in OrderedDict(sorted(item.items())).items():\n if target_is_key:\n data.append(key)\n continue\n\n # For empty list we are just writing an empty string ''.\n if isinstance(value, list) and not len(value):\n value = ''\n\n data.append(value)\n\n return data", "def values(self):\n for key in self.metadb.values():\n yield key, self.datadb[key]", "def get_keys_from_list():\n json_data = request.get_json()\n\n d = dict()\n d['elements'] = list()\n settings.setOptionsFile(get_info('uid'))\n fn = settings.getHistoROOTFileName()\n rfn = settings.getReferenceROOTFileName()\n# open root file stored in the root database\n f = ROOT.TFile(fn)\n# open reference root file stored in the root database\n rf = ROOT.TFile(rfn)\n\n for values in json_data.itervalues():\n for k in values:\n subd = dict()\n subd[\"index\"] = k[\"index\"]\n if fn != k[\"file\"]: \n fn = k[\"file\"]\n settings.setHistoROOTFileName(fn)\n f = ROOT.TFile(fn)\n print \"histogram :>>>>>: \",k[\"histogram\"]\n subd[\"data\"] = eval(cppyy.gbl.getDictionary(f,k[\"histogram\"]))\n if rfn != k[\"referenceFile\"]: \n rfn = k[\"referenceFile\"]\n settings.setReferenceROOTFileName(rfn)\n rf = ROOT.TFile(rfn)\n subd[\"refdata\"] = eval(cppyy.gbl.getDictionary(rf,k[\"reference\"]))\n d['elements'].append(subd)\n\n f.Close()\n rf.Close()\n\n return jsonify(d)", "def keys(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Expression]:", "def key_attributes(self):\n\n return [level.key for level in self.levels]", "def all_keys(self):\n return self.derivable_keys() + self.loadable_keys()", "def keys(self, installer_context):\n return self.spec.keys(self.data, installer_context)", "def get_dataset_names(ref_key, datasets):\n ds_dict = {}\n for ds in datasets.keys():\n ds_dict[ds] = datasets[ds]['columns']\n ds_names = get_result_names(ds_dict, ref_key, n=3)\n dataset_names = []\n for name in ds_names[0]:\n dataset_names.append(name[0])\n\n return dataset_names", "def reconcile_somef_data(data):\n out = {}\n for key, value in data.items():\n # for now, we are not exporting provenance keys. Ignore all keys like somef_provenance\n if \"somef\" not in key:\n # if the value is a list, preserve the list\n if isinstance(value, list) or isinstance(value, tuple):\n if len(value) > 0:\n if key == constants.CAT_LICENSE:\n # Create a License object with its id, URL, name and sameAs spdx identifier URL.\n # We don't keep the license content, as in a KG it may be too many file text\n license_result = {}\n for l in data[key]:\n if constants.PROP_SPDX_ID in l[constants.PROP_RESULT].keys():\n license_result[constants.PROP_SPDX_ID] = l[constants.PROP_RESULT][\n constants.PROP_SPDX_ID]\n if constants.PROP_NAME in l[constants.PROP_RESULT].keys():\n license_result[constants.PROP_NAME] = l[constants.PROP_RESULT][constants.PROP_NAME]\n if constants.PROP_URL not in license_result.keys() and constants.PROP_URL in \\\n l[constants.PROP_RESULT].keys():\n license_result[constants.PROP_URL] = l[constants.PROP_RESULT][constants.PROP_URL]\n # We get the first license we find from the repo\n elif l[constants.PROP_TECHNIQUE] == constants.TECHNIQUE_FILE_EXPLORATION \\\n and constants.PROP_SOURCE in l.keys() and \"api.github.com\" in \\\n license_result[constants.PROP_URL]:\n license_result[constants.PROP_URL] = l[constants.PROP_SOURCE]\n out[\"license\"] = license_result\n elif key in [constants.CAT_DOWNLOAD, constants.CAT_USAGE, constants.CAT_INSTALLATION]:\n # if there are multiple excerpts in separate sub-headers we concatenate them\n aggregated_value = \"\"\n other_results = []\n for result in data[key]:\n if result[constants.PROP_TECHNIQUE] == constants.TECHNIQUE_HEADER_ANALYSIS:\n # Note: this could be improved by adding as many '#' as parent headers\n aggregated_value += \"##\" + result[constants.PROP_RESULT][\n constants.PROP_ORIGINAL_HEADER] + \"\\n\"\n aggregated_value += result[constants.PROP_RESULT][constants.PROP_VALUE]\n else:\n other_results.append(result[constants.PROP_RESULT][constants.PROP_VALUE])\n # if there are file dumps like install.md, they are separate values for the property\n other_results.append(aggregated_value)\n out[key] = other_results\n\n elif key == constants.CAT_CITATION:\n # from each publication, we take its DOI or URL (if available from the previous extraction)\n # Note: This is a point of improvement to have a proper Publication object.\n citation_urls = []\n for cite in data[key]:\n result = cite[constants.PROP_RESULT]\n if constants.PROP_DOI in result.keys():\n citation_urls.append(result[constants.PROP_DOI])\n elif constants.PROP_URL in result.keys():\n citation_urls.append(result[constants.PROP_URL])\n if len(citation_urls) > 0:\n # remove duplicates\n citation_urls = list(set(citation_urls))\n out[key] = citation_urls\n elif key == constants.CAT_DOCUMENTATION:\n # we only keep links\n doc_links = [obj[constants.PROP_RESULT][constants.PROP_VALUE] for obj in value if\n obj[constants.PROP_RESULT][constants.PROP_TYPE] == constants.URL]\n if len(doc_links) > 0:\n out[key] = doc_links\n elif key == constants.CAT_OWNER:\n out[key] = value[0][constants.PROP_RESULT]\n elif key == constants.CAT_RELEASES:\n # we keep the full object\n out[key] = [obj[constants.PROP_RESULT] for obj in value]\n # we add a special property (hack) for making the mapping work\n out[constants.AUX_RELEASES_IDS] = [obj[constants.PROP_RESULT][constants.PROP_RELEASE_ID] for obj in value]\n else:\n try:\n if len(value) == 1:\n # remove list for easing mapping\n out[key] = value[0][constants.PROP_RESULT][constants.PROP_VALUE]\n else:\n out[key] = [obj[constants.PROP_RESULT][constants.PROP_VALUE] for obj in value]\n except:\n logging.warning(\"Error when converting field in RDF: \" + key)\n # if it is not a list, just get the excerpt\n else:\n out[key] = value[constants.PROP_RESULT][constants.PROP_VALUE]\n print(out)\n return out", "def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj", "def iterkeys(self):\r\n for wr in self.data.iterkeys():\r\n obj = wr()\r\n if obj is not None:\r\n yield obj", "def keys(targets):", "def assemble_changes(self, keys, resolve_keys, sql_state):\n result_keys = []\n all_keys = keys | resolve_keys\n for key in all_keys:\n node = sql_state.node_map[key]\n sql_item = sql_state.nodes[key]\n ancs = node.ancestors()[:-1]\n ancs.reverse()\n pos = next((i for i, k in enumerate(result_keys) if k in ancs), len(result_keys))\n result_keys.insert(pos, key)\n\n if key in resolve_keys and not sql_item.replace:\n # ancestors() and descendants() include key itself, need to cut it out.\n descs = reversed(node.descendants()[:-1])\n for desc in descs:\n if desc not in all_keys and desc not in result_keys:\n result_keys.insert(pos, desc)\n # these items added may also need reverse operations.\n resolve_keys.add(desc)\n return result_keys", "def props(kls):\n from sqlalchemy.orm.properties import RelationshipProperty\n return [x.key for x in kls.__mapper__.iterate_properties if type(x) != RelationshipProperty]", "def step_impl(context, key, parent):\n collected_entries = set()\n print('Collected entries:')\n for row in context.table:\n field_value = row[key]\n # Some fields may be a list of values\n if isinstance(field_value, list):\n for item in field_value:\n print(' ', item)\n collected_entries.add(item)\n else: # assume a simple scalar\n print(' ', field_value)\n collected_entries.add(field_value)\n\n print('Tested entries:')\n tested_entries = set()\n for entry in context.response_json:\n field_value = entry.get(parent).get(key)\n if isinstance(field_value, list):\n for item in field_value:\n tested_entries.add(item)\n else: # assume a simple scalar\n tested_entries.add(field_value)\n\n for item in tested_entries:\n print(' ', item)\n assert item in collected_entries", "def all(self):\r\n return self.attr_dict.keys()", "def iterkeys(self):\r\n return self.data.iterkeys()", "def print_keys_existing(self):\n\t\tfor key in self.cache:\n\t\t\tprint(key)", "def get_key_list(self) -> list:\n return self.key_functs.keys()", "def valuerefs(self):\r\n return self.data.values()", "def get_all_attributes(dataset):\n return dataset.flatMap(lambda x: x.keys()).distinct().collect()", "def find_dependencies(root):\n \n symbol_table = create_symbol_table(root)\n\n names = []\n #Set the depth of the root node\n set_depth(root, 0)\n #Stack of nodes to visit\n stack = Stack(root)\n \n #List of (src, dest) of dependencies\n dependency_table = DTable(symbol_table=symbol_table)\n\n for node, children, ntype in stack:\n \n stack.check_and_push_scope()\n\n #A Name is being loaded, therefore \n if ntype == \"Name\" and is_load(children):\n \"\"\"\n \"\"\"\n dependency_table.append( (stack.scopes, node))\n \n elif ntype == \"Assign\":\n #TODO need to add assignments and then revoke them\n #for child in children:\n #print children\n pass\n\n \n elif ntype == \"Attribute\":\n #TODO: attribute chains can be arbitrarily long\n #dep_dest = \"{}.{}\".format(node.value.id, node.attr)\n #print \"{} => {}\".format(scopes_to_str(scopes), dep_dest)\n\n #TODO: Can't just do dependency_table.append( (scopes, node))\n #since the unique_id function won't match the create the dep string like \n #{node.value.id}.{node.attr}.\n #Either generalize unique_id or something else.\n \n #Don't add children\n continue\n \n set_lineno(node, children)\n #Add children to stack\n #This musn't always be performed\n for child in children[::-1]:\n set_depth(child, node.depth + 1)\n stack.append(child)\n\n print \"dependency table is \"\n print dependency_table", "def get_required_mods(self):\r\n mods = []\r\n unknowntags = []\r\n for key, value in self.dependencies.items():\r\n if value.required_by:\r\n if value.provided_by:\r\n mods.append(list(value.provided_by)[0]) #Pick random'ish if more than one.\r\n else:\r\n unknowntags.append((key, value))\r\n return {\"mods\":sorted(mods, key= lambda x: x.mod.name), \"unknown\": unknowntags}", "def step_impl(context, key):\n print('Collected entries:')\n collected_entries = _aggregate_collected_entries(context, key)\n\n print('Tested entries:')\n for row in context.table:\n value = row[key]\n print(' ', value)\n assert value in collected_entries", "def step_impl(context, key):\n print('Collected entries:')\n collected_entries = _aggregate_collected_entries(context, key)\n\n print('Tested entries:')\n for row in context.table:\n value = row[key]\n print(' ', value)\n assert value in collected_entries", "def keys(self) -> List:\n pass", "def keys(self):\n for db, metas in self.task_metas.items():\n for task_name in metas.keys():\n yield f'{db}/{task_name}'", "def filter(self, keys, lst=None, func=\"all\"):\n f = all if func == \"all\" else any\n\n if lst is None:\n lst = self\n if DEP in lst[0] and INDEP in lst[0]:\n filt_dep = True\n else:\n filt_dep = False\n\n def filt_func(d):\n if filt_dep:\n return f([k in d[INDEP] or k in d[DEP] for k in listify(keys)])\n else:\n return f([k in d for k in listify(keys)])\n\n return filter(filt_func, lst)", "def chart_data(work_list, key):\n fields, values = [], []\n for obj in work_list:\n if obj[key] not in fields:\n bisect.insort_left(fields, obj[key])\n index = fields.index(obj[key])\n values.insert(index, 1)\n else:\n index = fields.index(obj[key])\n values[index] += 1\n return [fields, values]", "def learn(primer, dependencies):\n knowledge_map = defaultdict(dict)\n for row in primer:\n for dvcol, ivcol in dependencies.items():\n # knowledge of the dependent value is mapped to the value\n # of the independent value col\n #\n # notice:\n # - if the knowledge_map has no entry for the dv col,\n # a dict is constructed automatically\n # - the value of the iv col is used\n # - overwrites the previous known relationship\n knowledge_map[dvcol][row[ivcol]] = row[dvcol]\n return knowledge_map", "def GetSubkeys(self):", "def displayDependencies(self, iter):\n return (signal_base_display_dependencies(self.obj, iter))", "def keys(self):\n # This function implements the heart of the table logic. We manually\n # combine keys using the operators indicated by FactorExprNode objects.\n # The reason this method is part of DataCube and not implemented in\n # FactorExprNode is because the latter is meant to be a purely\n # declarative representation of the expression graph, not an imperative\n # utility class.\n \n if self._expr is None:\n # We are being asked for the keys of some raw underlying data.\n # If this is a numpy array, just return integer indices. If this\n # is a Pandas DataFrame, we can return its index.\n if isinstance(self._data, np.ndarray):\n return range(self._data.shape[0])\n elif hasattr(self._data, \"index\"):\n return self._data.index\n else:\n # TODO: For now, all faceting operations require that we have a \n # pandas DataFrame to compute the factors/levels. Integrate Bryan's\n # Level DType stuff to make this support regular ndarrays.\n if not hasattr(self._data, \"groupby\"):\n raise RuntimeError(\"Data needs to support group-by functionality\")\n op = self._expr.op\n if op is None:\n # Do a simple group-by\n return self._data.groupby(self._expr.factors)\n\n elif op == \"blend\":\n # Set union of keys from the two operands of the blend\n left, right = self._expr.factors\n keys = set.union(set(self._data.groupby(left).keys()), set(self._data.groupby(right).keys()))\n return keys\n \n elif op == \"cross\":\n return [self._data.groupby(left).keys(), self._data.groupby(right).keys()]\n\n elif op == \"nest\":\n # Nested group-by. In effect like a cross that rejects null\n # intersections between its input dimensions.\n # We need to loop over the keyspace of our left operand and\n # then do a group-by according to the factor named in the\n # right operand. If the left operand is a cross, then we\n # need to do the outer product, and we return two lists.\n # TODO\n raise NotImplementedError", "def _deriv_keys(self, key):\n prom2abs = self._prom2abs\n abs2prom = self._abs2prom\n\n DERIV_KEY_SEP = self._DERIV_KEY_SEP\n\n # derivative could be tuple or string, using absolute or promoted names\n if isinstance(key, tuple):\n of, wrt = key\n else:\n of, wrt = key.split(DERIV_KEY_SEP)\n\n # if promoted, will map to all connected absolute names\n abs_of = [of] if of in abs2prom else prom2abs[of]\n if wrt in prom2abs:\n abs_wrt = [prom2abs[wrt]][0]\n else:\n abs_wrt = [wrt]\n\n abs_keys = ['%s%s%s' % (o, DERIV_KEY_SEP, w) for o, w in itertools.product(abs_of, abs_wrt)]\n\n prom_of = of if of in prom2abs else abs2prom[of]\n if wrt in abs2prom:\n prom_wrt = abs2prom[wrt]\n else:\n prom_wrt = wrt\n\n prom_key = (prom_of, prom_wrt)\n\n return abs_keys, prom_key", "def keys(self):\n\t\treturn tuple(self.dist.keys())", "def getDependenciesCharts(self) -> Mapping[str, 'ChartVersionInfo']:\n deps = self.getDependenciesList()\n ret: Dict[str, 'ChartVersionInfo'] = {}\n for dep in deps:\n ret[dep['name']] = self.getDependencyChart(dep['name'])\n return ret", "def test_serialize_data_with_all_deps(self):\n\n gb = GraphBuilder()\n libfwk = gb.add_lib32(PT_SYSTEM, 'libfwk')\n libvndk = gb.add_lib32(PT_SYSTEM, 'libvndk',\n dt_needed=['libvnd_bad.so'], extra_dir='vndk')\n libvndk_sp = gb.add_lib32(PT_SYSTEM, 'libutils',\n dt_needed=['libvnd_bad.so'],\n extra_dir='vndk-sp')\n libvnd = gb.add_lib32(PT_VENDOR, 'libvnd',\n dt_needed=['libvndk.so', 'libutils.so'])\n libvnd_bad = gb.add_lib32(PT_VENDOR, 'libvnd_bad', extra_dir='vndk-sp')\n gb.resolve()\n\n with patch('sys.stderr', StringIO()):\n vndk_sets = gb.graph.compute_degenerated_vndk(set(), None)\n\n self.assertNotIn(libvnd_bad, libvndk.deps_good)\n self.assertNotIn(libvnd_bad, libvndk_sp.deps_good)\n\n strs, mods = DepsInsightCommand.serialize_data(\n list(gb.graph.all_libs()), vndk_sets, ModuleInfo())\n\n deps = self._get_module_deps(strs, mods, libvndk.path)\n self.assertIn(libvnd_bad.path, deps)\n\n deps = self._get_module_deps(strs, mods, libvndk_sp.path)\n self.assertIn(libvnd_bad.path, deps)\n\n users = self._get_module_users(strs, mods, libvnd_bad.path)\n self.assertIn(libvndk.path, users)\n self.assertIn(libvndk_sp.path, users)", "def set_dependencies(self,dependency_list):\n\t\tdeps = {}\n\t\tfor relation in dependency_list:\n\t\t\tself.nr_of_deps += 1\n\t\t\t# Find the type of relation\n\t\t\trel = re.match('[a-z\\_]*(?=\\()',relation).group(0)\n\t\t\t# Find head and dependent\n\t\t\thead = int(re.search('(?<=-)[0-9]*(?=, )',relation).group(0))\n\t\t\tdep = int(re.search('(?<=-)[0-9]*(?=\\)$)', relation).group(0))\n\t\t\t# Set head position and create\n\t\t\t#dictinary entries\n\t\t\tif head == 0:\n\t\t\t\tself.head_pos = dep\n\t\t\telse:\n\t\t\t\tdeps[head] = deps.get(head,[])\n\t\t\t\tdeps[head].append([dep,rel])\n\t\t#set headpos to first head in dependency list if sentence has no head\n\t\tif dependency_list and not self.head_pos:\n\t\t\tfirst_head = int(re.search('(?<=-)[0-9]*(?=, )',dependency_list[0]).group(0))\n\t\t\tself.head_pos = first_head\n\t\treturn deps", "def data(self, *keys: _TResultKey) -> t.List[t.Dict[str, t.Any]]:\n return [record.data(*keys) for record in self]", "def derivable_keys(self):\n res = []\n for cl in type(self).__mro__:\n if cl in self._derived_quantity_registry:\n res += list(self._derived_quantity_registry[cl].keys())\n return res", "def getDataDict(self):\n #code begins here \n return self.__dflst,self.__dfwells", "def AllKeys(self) -> _n_0_t_1[str]:" ]
[ "0.6234829", "0.60996187", "0.56604636", "0.56510067", "0.55388093", "0.55388093", "0.5499287", "0.54292846", "0.5428409", "0.5426958", "0.54142815", "0.53891647", "0.5372534", "0.5372534", "0.5371351", "0.53623796", "0.53498656", "0.5321484", "0.5321484", "0.5317146", "0.5309717", "0.5309717", "0.5300489", "0.52988786", "0.52704006", "0.5251809", "0.52489984", "0.52438956", "0.5209912", "0.5195884", "0.5190358", "0.51567984", "0.51168597", "0.50960594", "0.50891596", "0.5086395", "0.5065056", "0.5060953", "0.50494224", "0.5043923", "0.5026994", "0.5016816", "0.5001975", "0.4995903", "0.49951988", "0.49881303", "0.49798417", "0.49790475", "0.4977265", "0.4972429", "0.49643543", "0.4964009", "0.49615222", "0.4960416", "0.4960019", "0.49570847", "0.4955711", "0.49533328", "0.49505872", "0.49505764", "0.49463433", "0.4945465", "0.4945435", "0.49423778", "0.49362582", "0.49348503", "0.49316397", "0.49307138", "0.49307138", "0.49177048", "0.49173945", "0.4910763", "0.49032247", "0.4895468", "0.4891154", "0.4890868", "0.48858726", "0.4881472", "0.48813513", "0.48669687", "0.48650408", "0.48621657", "0.48621657", "0.48608625", "0.48605654", "0.4860523", "0.48589435", "0.48587137", "0.48538536", "0.48524266", "0.48505124", "0.48501128", "0.48459804", "0.48455375", "0.48371103", "0.48370472", "0.48355305", "0.48341623", "0.48225623", "0.4819978" ]
0.49673614
50
Return lists of values grouped by other independent variables
def grouped(self, keys, labels="dict", as_dicts=False): # TODO: This seems method unnecessarily complex combined = self.combined() filtered = self.filtered(keys, lst=combined) out = {} for dct in filtered: d_labels = dct[INDEP].copy() d = {} keys_copy = keys.copy() for k in keys: if k in d_labels: d[k] = d_labels.pop(k) elif k in dct[DEP]: d[k] = dct[DEP][k] hsh = hash_dict(d_labels) if hsh not in out: f_labels = dict_to_str(d_labels) if labels == "str" else d_labels out[hsh] = {"labels": f_labels, "values": {k: [] for k in keys_copy}} d_values = out[hsh]["values"] for k in keys_copy: d_values[k].append(d[k]) if as_dicts: return list(out.values()) else: lst = [] for group in out.values(): row = [v for v in group["values"].values()] row.append(group["labels"]) lst.append(row) return lst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_variable_groups(all_inputs):\n row_length = len(all_inputs[0])\n for single_input in all_inputs[1:]:\n if len(single_input) != row_length:\n raise ValueError(\n \"Please make sure the length is the same if you want to input multiple values when the type of variables is t_array or t_mapping\")\n\n final_groups = list()\n row_length = len(all_inputs[0])\n col_length = len(all_inputs)\n for i in range(1, row_length):\n temp_list = list()\n for j in range(col_length):\n temp_list.append((all_inputs[j][0], all_inputs[j][i]))\n final_groups.append(temp_list)\n return final_groups", "def values_grouped_by_attribute(self, attributes):\n values = []\n for attribute in attributes:\n _vals_in_attr = []\n for instance in self.data:\n if attribute.is_continuous():\n _vals_in_attr.append(float(instance.value(attribute)))\n else:\n _vals_in_attr.append(instance.value(attribute))\n values.append(_vals_in_attr)\n return values", "def remove_invariable_features_grouped(tX_grouped):\n\n new_tX_grouped = []\n for i in range(len(tX_grouped)):\n new_tX_grouped.append(remove_invariable_features(tX_grouped[i]))\n return new_tX_grouped", "def _get_subgroups(self):\n groups = [] # array of arrays\n for i in range(self.filter.shape[0]):\n for j in range(i):\n if self.filter[i][j]:\n if len(groups) < 1:\n groups.append([j, i])\n continue\n found = False\n for group_i, _ in enumerate(groups):\n if i in groups[group_i]:\n if j not in groups[group_i]:\n groups[group_i].append(j)\n found = True\n elif j in groups[group_i]:\n if i not in groups[group_i]:\n groups[group_i].append(i)\n found = True\n if not found:\n groups.append([i, j])\n return groups", "def grouped_vectors(self):\n # Group #\n grouped = self.simulated.groupby(self.group_cols)\n # Iterate #\n result = []\n for col_values, df in grouped:\n # Keep the current values of the group columns #\n current = dict(zip(self.group_cols, col_values))\n # Compute a discrete numpy vector #\n current[self.sum_col] = aggregator(df, self.sum_col, self.bin_col)\n # Make a series and append #\n result.append(pandas.Series(current))\n # Put all series into a data frame #\n result = pandas.DataFrame(result)\n # Return #\n return result", "def divisor_subgroups(self):\n return [Gamma0_constructor(M) for M in self.level().divisors()]", "def connex_components(self):\n unchecked = set(self.v.values())\n groups = []\n while len(unchecked):\n vcon = self.member_family(unchecked.pop())\n unchecked -= set(vcon)\n groups.append(set(vcon))\n return groups", "def marginals(self):\n all_variables = [None for ii in range(self.nvars)]\n for ii in range(self.nunique_vars):\n for jj in self.unique_variable_indices[ii]:\n all_variables[jj] = self.unique_variables[ii]\n return all_variables", "def connectedComponents(self):\n components = []\n X = set(self.X)\n while X:\n Xi = X.pop()\n if Xi.states <= 1: continue # don't include missing or assigned variables \n group = {Xi} # start a new group with this variable\n queue = [Xi] # do DFS on the graph from Xi to find its connected component:\n while queue:\n n = queue.pop()\n nbrs = self.markovBlanket(n) # get all connected variables\n nbrs.difference_update(group) # remove any we've already seen\n X.difference_update(nbrs) # remove new ones from unexplored variable list\n group.update(nbrs) # add them to this connected component\n queue.extend(nbrs) # and continue exploring from them in DFS order\n components.append(group)\n return components", "def all_variables(formula):\n return collect_unique_nodes(formula, lambda x: isinstance(x, Variable))", "def group_data(self):\n groups = []\n bug_map = self.bug_map()\n union_map = self.union_map(list(bug_map.keys()))\n # test_id grouping\n for union_id in set(union_map.values()):\n group = []\n for k, v in union_map.items():\n if v == union_id:\n group.extend(bug_map[k])\n if len(group) > 1:\n groups.append(group)\n return groups", "def groupByLabel( y ):\n index = []\n for i in np.unique(y): # pour toutes les classes\n ind, = np.where(y==i)\n index.append(ind)\n \n return index", "def split_in_groups_1(y, tX, ids, unwanted_value):\n\n unwanted_value_check = 1 * (tX == unwanted_value)\n masks, indices, counts = np.unique(unwanted_value_check, return_inverse=True, return_counts=True, axis=0)\n\n y_grouped, tX_grouped, ids_grouped = [], [], []\n for i in range(max(indices) + 1):\n condition = indices == i\n y_grouped, tX_grouped, ids_grouped = extract_from_dataset(y, tX, ids, condition, y_grouped, tX_grouped,\n ids_grouped)\n return np.asarray(y_grouped), np.array(tX_grouped, dtype=object), np.asarray(ids_grouped), masks, counts", "def getVar2FactorsMap(self):\r\n V = self.getAllNodes()\r\n return list(list(idx for idx,f in enumerate(self.factors) if i in f.var) for i in V)", "def values(self):\n return [ self[x] for x in self ]", "def get_sub_values(self):\n return list()", "def partition_by(inputs, attribute):\n groups = defaultdict(list)\n for input in inputs:\n key = input[0][attribute] # get the value of the specified attribute\n groups[key].append(input) # then add this input to the correct list\n return groups", "def movies(self):\n return self.data.groupby('Parameters')", "def varlist(data):\n \n Bxm=data[\"Bx\"].mean()\n Bym=data[\"By\"].mean()\n Bzm=data[\"Bz\"].mean()\n Bxsqm=(data[\"Bx\"]**2).mean()\n Bysqm=(data[\"By\"]**2).mean()\n Bzsqm=(data[\"Bz\"]**2).mean()\n Bxym=(data[\"Bx\"]*data[\"By\"]).mean()\n Bxzm=(data[\"Bx\"]*data[\"Bz\"]).mean()\n Byzm=(data[\"By\"]*data[\"Bz\"]).mean()\n \n Varx= Bxsqm-Bxm**2\n Varxy=Bxym-Bxm*Bym\n Varxz=Bxzm-Bxm*Bzm\n Vary=Bysqm-Bym**2\n Varyz=Byzm-Bym*Bzm\n Varz=Bzsqm-Bzm**2\n var=[Varx,Varxy, Varxz,Varxy,Vary,Varyz,Varxz,Varyz,Varz]\n var=np.array(var)\n var=var.reshape((3,3))\n return var", "def data_grouping(self):\n group_container, film_container, plank_container = [[] for a in range(self.tot_conditions)], \\\n [[] for a in range(self.tot_conditions)], \\\n [[] for a in range(self.tot_conditions)]\n\n for i in self.data_labels:\n group = int(i[:-1])\n group_container[group - 1].append(i)\n film_container[group - 1].append(self.film_count[self.data_labels.index(i)])\n plank_container[group - 1].append(self.plank_count[self.data_labels.index(i)])\n\n return group_container, film_container, plank_container", "def partition_by(inputs, attribute):\n groups = defaultdict(list)\n for input in inputs:\n key = input[0][attribute]\n groups[key].append(input)\n return groups", "def values(self):\n return [p.value for p in self]", "def grouping(data,dis):\n cluRe = []\n for i in range(len(data)):\n cluRe.append(np.argsort(dis[i])[0])\n \n return np.asarray(cluRe)", "def pop_var_from_subpop_var(groups):\n return np.hstack(groups).var()", "def grouped_data(self):\n return self.data.groupby(\"Core\")", "def split_var(self, x):\n cum_dims = list(np.cumsum(self.dims))\n out = []\n for slice_from, slice_to, dist in zip([0] + cum_dims, cum_dims, self.dists):\n sliced = x[:, slice_from:slice_to]\n out.append(sliced)\n return out", "def one_in_each(self):\n form = []\n for group in range(0, len(self.groups)):\n f = And(*[\n OneHot(*[ self.X[group, idx, root]\n for idx in range(0, self.items_per) ])\n for root in range(0, self.items_per) ])\n form.append(f.to_dnf())\n return form", "def variables(self):\n return [term.variable for term in self.terms]", "def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)", "def group_by_cat(y, x, ids):\n NB_CATEGORY = 4 \n # Column index of the categorical feature\n IDX_COL_CAT = np.where((x.max(axis=0) - x.min(axis=0)) == 3)[0][0]\n Y = []\n X = []\n IDS = []\n for i in range(NB_CATEGORY):\n row_idx = np.where(x[:, IDX_COL_CAT] == i)[0] # index of the rows in category i\n x_cat = np.delete(x[row_idx], IDX_COL_CAT, axis=1) # Remove category feature\n Y.append(y[row_idx])\n X.append(x_cat) \n IDS.append(ids[row_idx])\n return Y, X, IDS", "def group_by(array: Iterable[T], key: Callable[[T], Union[str, int, float]]=None, preserve_order: bool=False) -> List[List[T]]:\n if preserve_order:\n values: Dict[Union[str, int, float], List[T]] = OrderedDict()\n else:\n values: Dict[Union[str, int, float], List[T]] = {}\n if key is None:\n key = identity\n\n for v in array:\n v_key = key(v)\n if v_key not in values:\n values[v_key] = []\n values[v_key].append(v)\n\n return list(values.values())", "def values(self):\n return [i.value for i in self.value]", "def get_vars(self):\n return [self.mu, self.var]", "def _get_groups(X, y):\n if SK18:\n X, y = _indexable(X, y)\n return X, y, None", "def var(\n self, values: pdarray, skipna: bool = True, ddof: int_scalars = 1\n ) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"var\", skipna, ddof)\n return k, cast(pdarray, v)", "def independent_variables(self):\n return self._independent_variables", "def get_all_variables(self):\n out = []\n for i in self.items:\n out += i.get_all_variables()\n return out", "def get_all_variables(self):\n out = []\n for i in self.items:\n out += i.get_all_variables()\n return out", "def get_all_variables(self):\n out = []\n for i in self.items:\n out += i.get_all_variables()\n return out", "def values(self):\r\n return [self[k] for k in self]", "def get_groups(array):\n groups = []\n for key, group in groupby(array):\n i = sum(1 for _ in group)\n groups.append((int(key), i))\n return groups", "def get_visits(xvars):\n is_timespace = len(list(xvars)[0]) == 3\n if is_timespace:\n visits = [var[0] for var in xvars.iteritems() if var[1].X == 1]\n return sorted(visits, key=lambda visit: visit[2])\n\n n = max(xvars.keys())[0] + 1\n visits = []\n for i in range(n):\n for j in range(n):\n if not i == j and xvars[i, j].X == 1:\n visits.append((i, j))\n return sorted(visits, key=lambda visit: visit[0])", "def split_phases(df):\n return(\n tuple([df.groupby('Phase').get_group(p) for p in df.Phase.unique()])\n )", "def obtain_groups(input_results, input_vertices):\n\tbest_first, best_second = [], []\n\tfor i in xrange(len(input_results)):\n\t\tfirst_group, second_group = best_group(input_results[i].solution, input_vertices)\n\t\tbest_first.append(first_group)\n\t\tbest_second.append(second_group)\n\n\tcomplete_first, complete_second = drop_groups(best_first, best_second)\n\n\treturn complete_first[0], complete_second[0]", "def additional_splitting(y_grouped, tX_grouped, ids_grouped, unwanted_value):\n y_grouped_new, tX_grouped_new, ids_grouped_new, masks_new, counts_new = [], [], [], [], []\n for i in range(len(tX_grouped)):\n y, tX, ids, masks, counts = split_in_groups_1(y_grouped[i], tX_grouped[i], ids_grouped[i], unwanted_value)\n for j in range(len(tX)):\n y_grouped_new.append(y[j])\n tX_grouped_new.append(tX[j])\n ids_grouped_new.append(ids[j])\n masks_new.append(masks[j])\n counts_new.append(counts[j])\n return y_grouped_new, tX_grouped_new, ids_grouped_new, masks_new, counts_new", "def aggregate(predictions, aggfunc):\n return [aggfunc(sublist) for sublist in np.transpose(predictions)]", "def group(self):\n instances = self.instances\n groups = []\n for i in range(len(self.labels)):\n groups.append([instance for instance in instances if instance[-1] == self.labels[i]])\n return groups", "def _find_Vgroups(self, X):\n na_value = X[self.V_features].isnull().sum()\n na_list = na_value.unique()\n na_value = na_value.to_dict()\n cols_same_null = []\n for i in range(len(na_list)):\n cols_same_null.append([k for k, v in na_value.items() if v == na_list[i]])\n return cols_same_null", "def group_by(y):\n\n y_dict = {}\n for i, value in enumerate(y):\n try:\n y_dict[value]\n #Value in dictionary\n y_dict[value].append(i)\n except:\n #Value not in dictionary\n y_dict[value] = [i]\n\n return y_dict", "def group(self):\n return self._groupby().agg(self.formula_name)", "def __aggregate(self, series):\n if series.name in self.__non_redundant_entity_attributes or series.name in self.__redundant_entity_attributes: # Textual entities\n merged_sensitive_terms = list()\n for sensitive_terms in series.dropna():\n merged_sensitive_terms = merged_sensitive_terms + sensitive_terms\n return merged_sensitive_terms if len(merged_sensitive_terms) > 0 else None # Return merged result, or None\n else:\n if series.nunique() > 1: # Since there are more values, pack them into a list / frozenset\n if series.name in self.__textual_attributes or series.name in self.__config.get_insensitive_attributes():\n return list(series.array)\n else:\n return frozenset(series.array)\n else:\n return series.unique()[0] # Else return just this single value", "def making_x_val_data_list_for_kfold(data:pandas.core.frame.DataFrame, split_num:int) -> list:\n val_data_list = making_dataset_list_val(data, split_num)\n x_val_data_list = making_dataset_list_x(val_data_list)\n return translate_pandas_to_numpy(x_val_data_list)", "def grouper(groupset):\n\n return list(combinations(groupset, 2))", "def extract_from_dataset(y, tX, ids, condition, y_grouped, tX_grouped, ids_grouped):\n\n y_grouped.append(np.extract(condition, y))\n ids_grouped.append(np.extract(condition, ids))\n\n indices_to_take = np.extract(condition, range(len(tX)))\n tX_grouped.append(np.take(tX, indices_to_take, axis=0))\n\n return y_grouped, tX_grouped, ids_grouped", "def get_values(self):\n return map(lambda x: x.value(),self)", "def data_for_grouping() -> NoReturn:\n raise NotImplementedError", "def data_for_grouping() -> NoReturn:\n raise NotImplementedError", "def get_variables(self):\n\n self._enforce_coupling()\n\n dv = []\n for scenario in self.scenarios:\n if scenario.group_master:\n dv.extend(scenario.active_variables())\n else:\n dv.extend(scenario.uncoupled_variables())\n\n for body in self.bodies:\n if body.group_master:\n dv.extend(body.active_variables())\n else:\n dv.extend(body.uncoupled_variables())\n\n return dv", "def split(self, by, return_dict=True):\n groups = np.unique(by)\n ix = [np.where(by == groups[i])[0] for i in range(len(groups))]\n if return_dict:\n output = {k:self.subset(i) for k,i in zip(groups, ix)}\n else:\n output = [self.subset(i) for i in ix]\n return output", "def get_groups(self):\n return [self.primary_group] + list(self.secondary_groups)", "def get_data(self):\n\n if self.has_group_cols():\n data = []\n g_cols = self.get_group_names()\n for (_, targets) in self.targets:\n data.append(self.get_series(targets))\n else:\n data = [self.get_series(self.targets)]\n\n return ListDataset(data, freq=self.freq)", "def _transform(self, df: pd.DataFrame) -> Tuple[Dict, Dict]:\n determinant = self.lhs.eval(df=df)\n dependent = self.rhs.eval(df=df)\n groups = dict()\n meta = dict()\n for index, values in enumerate(zip(determinant, dependent)):\n value = values[0] # determinant value: keys\n if isinstance(value, list):\n value = tuple(value)\n if value not in groups:\n groups[value] = list()\n meta[value] = Counter()\n groups[value].append(index)\n\n meta_value = values[1] # dependent value: meta\n counter = Counter([tuple(meta_value.tolist())]) if isinstance(meta_value, pd.Series) else Counter([meta_value])\n meta[value] += counter\n\n return groups, meta", "def splittable_variables(self) -> List[int]:\n #print(\"enter bartpy/bartpy/data.py CovariateMatrix splittable_variables\")\n \n for i in range(0, self._n_features):\n if self._splittable_variables[i] is None:\n self._splittable_variables[i] = is_not_constant(self.get_column(i))\n \n output = [i for (i, x) in enumerate(self._splittable_variables) if x is True] \n #print(\"-exit bartpy/bartpy/data.py CovariateMatrix splittable_variables\")\n return output", "def get_possible_values(self):\n possible_values = {}\n for f in self.__features:\n possible_values[f] = list(self.__data[f].unique())\n return possible_values", "def values(self):\n # TODO: Collect all values in each of the buckets\n all_values = [] # Will store all the key\n\n for bucket in self.buckets:\n for value in bucket:\n if value is not None:\n all_values.append(value[1])\n return all_values", "def get_variables(self):\n return [self.variables[key] for key in sorted(self.variables)]", "def get_unique_categorical(series: pd.Series) -> list:\n\n return list(series.unique())", "def making_y_val_data_list_for_kfold(data:pandas.core.frame.DataFrame, split_num:int) -> list:\n val_data_list = making_dataset_list_val(data, split_num)\n y_val_data_list = making_dataset_list_y(val_data_list)\n return translate_pandas_to_numpy(y_val_data_list)", "def get_item_groups(dataset):\n return dataset.groupby(\"name\", as_index=False, sort=False).groups", "def groupbylist(\n iterable: Iterable[_T],\n key: 'Callable[[_T], Any] | None' = None\n) -> list[tuple[Any, list[_T]]]:\n return [(k, list(g)) for k, g in groupby(iterable, key=key)]", "def labeled_dicoms(self):\n return [sorted(self.data)[i-1][1:] for i in self.labeled]", "def data_for_grouping(allow_in_pandas):\n a, b, c = (1,), (2,), (3,)\n return PandasArray(np.array(\n [b, b, np.nan, np.nan, a, a, b, c]\n ))", "def state(self) -> pd.Series:\n return pd.Series(\n (stat.get() for stat in self._groups.values()),\n index=(\n pd.Index(key[0] for key in self._groups.keys())\n if self.by and len(self.by) == 1\n else pd.MultiIndex.from_tuples(self._groups.keys(), names=self.by)\n ),\n name=self._feature_name,\n )", "def get_shapley_values(model_outcomes):\n # Get superset $N$\n N = sorted(model_outcomes.keys(), key=lambda x: -len(x))[0]\n\n # Initialize results container\n shapley_values = {}\n\n # Iterate through all features then compute their Shapley values\n for feature_name in N:\n shapley_values[feature_name] = compute_shapley_value(\n model_outcomes, feature_name)\n\n return shapley_values", "def params(self):\n return [p for sublist in [o.params for o in self.obs] for p in sublist]", "def _get_param_groups(self, model: nn.Module) -> List[Dict[str, Any]]:\n param_groups = [\n {\"params\": [], \"weight_decay\": 0.1, \"lr\": 1e-2},\n {\"params\": [], \"weight_decay\": 0.01, \"lr\": 1e-3},\n {\"params\": []},\n ]\n for param_name, param in model.named_parameters():\n if \"weight\" in param_name:\n param_groups[0][\"params\"].append(param)\n elif \"bias\" in param_name:\n param_groups[1][\"params\"].append(param)\n else:\n param_groups[2][\"params\"].append(param)\n return param_groups", "def groupby(self):\n try:\n return plist([x.groupby() for x in self])\n except Exception:\n groups = collections.OrderedDict()\n for i, x in enumerate(self):\n if x not in groups:\n groups[x] = plist()\n groups[x].append(self.__root__[i])\n return plist(groups.values())", "def get_model_values(self, i, df):\n if i == 0:\n df.participation = np.tile(self.participation, (61, 1)).transpose()\n abatement = self.abatement(df.gross_output[i], df.miu[i],\n df.backstop_growth[i],\n df.participation[i])\n damages = self.damages(df.gross_output[i],\n df.temp_atmosphere[i], abatement)\n output = self.output(df.gross_output[i], damages, abatement,\n df.temp_atmosphere[i])\n output_abate = self.output_abate(abatement, df.gross_output[i])\n return [abatement, damages, output, output_abate]", "def group_list(self, array):\n\n if array.ndim == 1:\n return [np.array(array[self.row_indices[k]])\n for k in self.group_labels]\n else:\n return [np.array(array[self.row_indices[k], :])\n for k in self.group_labels]", "def df(x_i):\n return [2 * x_ij for x_ij in x_i]", "def df(x_i):\n return [2 * x_ij for x_ij in x_i]", "def consecutive_values(vector: List[T], test: Callable[[T], bool]) -> List[List[T]]:\n\n groups = []\n current_group = []\n\n for value in vector:\n if test(value):\n current_group.append(value)\n else:\n if current_group:\n groups.append(current_group)\n current_group = []\n\n if current_group:\n groups.append(current_group)\n\n return groups", "def simplify(self):\n repeat = False\n simp = []\n import itertools\n for obj, agroup in itertools.groupby(list(self), lambda x: x[0]):\n xs = list(agroup)\n if len(xs) > 1:\n repeat = True\n n = sum([x[1] for x in xs])\n if n != 0:\n simp.append((obj, n))\n self.__x[0:] = simp\n if repeat:\n self.simplify()", "def get_var_group(self, variable):\n return self.dataset[variable].group()", "def variables(self):\n return np.array(list(self._match_result_dict.keys()))", "def _get_list(value_attribute, label_attribute):\n servers = get_servers()\n ret = []\n already_handled = []\n\n for server in servers:\n value = value_attribute(server)\n label = label_attribute(server)\n\n if value and value not in already_handled:\n ret.append({\n 'value': value,\n 'label': label\n })\n\n already_handled.append(value)\n\n return sorted(ret, key=lambda k: k['label'])", "def getInstances(X_train,X_test,x_values,y_values):\n\n newTrain = []\n yTrain = []\n for i, train in enumerate(X_train): #loop through each train\n newTrain.append([])\n yTrain.append([])\n for index in train: #loop through each index\n newTrain[i].append(x_values[index])\n yTrain[i].append(y_values[index])\n\n newTest = []\n yTest = []\n for i, test in enumerate(X_test): #loop through each train\n newTest.append([])\n yTest.append([])\n for index in test: #loop through each index\n newTest[i].append(x_values[index])\n yTest[i].append(y_values[index])\n\n return newTrain, newTest, yTrain, yTest", "def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)", "def age_group_mixing():\n p = []\n for j in np.linspace(1,5,5):\n for k in np.linspace(1,5,5):\n if j == k:\n p.append(1)\n else:\n p.append(0.2**np.abs(j+1-k))\n p /= sum(p)\n return p", "def variable_names(self):\n \n return [x['variable'] for x in self.variable_dicts()]", "def serendipity_set_1d(\n dim: int, order: int, variables: AxisVariablesNotSingle = x\n) -> typing.List[ScalarFunction]:\n basis: typing.List[ScalarFunction] = []\n for s in range(order + 1, order + dim + 1):\n for i in serendipity_indices(s, s - order, dim):\n p = 1\n for j, k in zip(variables, i):\n p *= j ** k\n basis.append(ScalarFunction(p))\n return basis", "def statistify(criteria):\n final = []\n for degree in criteria.keys():\n if degree == 'total':\n continue\n for num in range(0,criteria[degree]):\n final.append(int(degree.split('degree')[1]))\n return final", "def outcome_bygroup_df(df, outcomes, groupbyvars):\n colselect = groupbyvars + outcomes\n colnames=[]\n bygender = df.loc[:, colselect].groupby('group_gender')[outcomes].mean().T\n colnames.extend(list(bygender.columns))\n bymigrant = df.loc[:, colselect].groupby('group_migrant')[\n outcomes].mean().T\n colnames.extend(list(bymigrant.columns))\n byinformal = df.loc[:, colselect].groupby('group_informal')[\n outcomes].mean().T\n colnames.extend(list(byinformal.columns))\n bytotal = df.loc[:, colselect].groupby('Total')[outcomes].mean().T\n colnames.extend(list(bytotal.columns))\n data = pd.concat([bygender, bymigrant, byinformal,\n bytotal], axis=1, ignore_index=True)\n \n data.columns=colnames\n data['label'] = varlabel_df.loc[outcomes]\n data = data.set_index('label')\n return data", "def outcome_bygroup_df(df, outcomes, groupbyvars):\n colselect = groupbyvars + outcomes\n colnames=[]\n bygender = df.loc[:, colselect].groupby('group_gender')[outcomes].mean().T\n colnames.extend(list(bygender.columns))\n bymigrant = df.loc[:, colselect].groupby('group_migrant')[\n outcomes].mean().T\n colnames.extend(list(bymigrant.columns))\n byinformal = df.loc[:, colselect].groupby('group_informal')[\n outcomes].mean().T\n colnames.extend(list(byinformal.columns))\n bytotal = df.loc[:, colselect].groupby('Total')[outcomes].mean().T\n colnames.extend(list(bytotal.columns))\n data = pd.concat([bygender, bymigrant, byinformal,\n bytotal], axis=1, ignore_index=True)\n \n data.columns=colnames\n data['label'] = varlabel_df.loc[outcomes]\n data = data.set_index('label')\n return data", "def getVariables(self):\n statVars = [self[vn] for vn in self.statVars]\n timeVars = [self[vn] for vn in self.timeVars]\n return statVars + timeVars", "def summaries(self):\n return [ self.__class__(sets=[x]) for x in self.sets ]", "def valuerefs(self):\r\n return self.data.values()", "def potential_splits(self, potential_xj):\r\n \r\n self.cur.execute(\"SELECT DISTINCT \" + potential_xj + \" FROM \" + self.table_name + \";\")\r\n potential_splits = [ii[0] for ii in self.cur.fetchall()]\r\n return potential_splits", "def separate_by_list(self, criterium, reshape=False):\n\n separated_seqs = {}\n\n for s in self.unstructured():\n key = criterium(s)\n if key in separated_seqs:\n separated_seqs[key].append(s)\n else:\n separated_seqs[key] = [s]\n\n for key, seqs in separated_seqs.items():\n if reshape:\n separated_seqs[key] = DataArray(separated_seqs[key]).reshape(self.shape)\n else:\n separated_seqs[key] = DataArray(separated_seqs[key])\n\n return separated_seqs", "def get_possible_labels(Y):\n \n return list(set(itertools.chain(*Y)))", "def variables(self):\n return [i.name for i in self.inputs + self.outputs]" ]
[ "0.6295473", "0.6200967", "0.57439184", "0.5688238", "0.56285703", "0.55854857", "0.55849904", "0.5516372", "0.5502717", "0.5489601", "0.5482229", "0.5476639", "0.54517305", "0.545083", "0.54401696", "0.54284453", "0.5395837", "0.539547", "0.53786176", "0.5318466", "0.53179634", "0.5287798", "0.52811766", "0.5278392", "0.5273344", "0.5272549", "0.52574104", "0.52561074", "0.52514505", "0.5249484", "0.5242955", "0.52296925", "0.52243555", "0.5206279", "0.51904345", "0.5152404", "0.51051813", "0.51051813", "0.51051813", "0.5080877", "0.5074385", "0.5073383", "0.5028572", "0.502197", "0.5019446", "0.49893695", "0.49889264", "0.49868116", "0.49794552", "0.4967941", "0.49645418", "0.49496847", "0.49452215", "0.4944212", "0.49398947", "0.49376547", "0.49376547", "0.49338987", "0.49282092", "0.49197906", "0.49167398", "0.49076596", "0.48988512", "0.4870724", "0.48660442", "0.48625997", "0.4860753", "0.48571026", "0.484733", "0.48462528", "0.484408", "0.48352355", "0.48343626", "0.4833201", "0.4831885", "0.48300248", "0.4824173", "0.48230892", "0.48226908", "0.48209736", "0.48209736", "0.48195374", "0.48186663", "0.48144323", "0.481442", "0.48101363", "0.48079985", "0.48079297", "0.48026145", "0.48011664", "0.47968557", "0.47898272", "0.47864085", "0.47864085", "0.47836983", "0.47815093", "0.47759995", "0.4775796", "0.47738582", "0.477296", "0.47713616" ]
0.0
-1
Return a dictionary of values for the key, by index
def find(self, key, lst=None): lst = self if lst is None else lst out = {} for row in lst: if key in row[DEP]: out[row[IND]] = row[DEP][key] elif key in row[INDEP]: out[row[IND]] = row[INDEP][key] return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __indicesToValues(self, mappings, indices):\n values = collections.OrderedDict()\n\n i = 0\n for key, _ in mappings.items():\n values[key] = mappings[key](indices[i])\n\n i = i + 1\n\n return values", "def key_dict_values (self):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT note_index \"\r\n +\"FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n return self.key_dict.values()", "def index_dict(data, idx, end=None):\n if end is not None:\n return {k:d[idx:end] if hasattr(d, '__getitem__') else d for k, d in data.items()}\n else:\n return {k:d[idx] if hasattr(d, '__getitem__') else d for k, d in data.items()}", "def __getitem__(self, key):\n items = {}\n for k in self._indexer(key):\n if k not in self._data:\n self._data[k] = self._dof_cls(self._pair_instance, self._name,\n k)\n items[k] = self._data[k]\n if len(items) == 1:\n return items.popitem()[1]\n return items", "def value_for_index(self, index):\r\n return self[self.keyOrder[index]]", "def __getitem__(self, index):\n if isinstance(index, types.SliceType):\n # fetching a slice returns an OrderedDict\n return self._main[index].items()\n key = self._main._sequence[index]\n return (key, self._main[key])", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def get_index_from_dict(d):\n d_ind = {}\n for e in d.keys():\n idxs = np.array(d[e].index)\n d_ind[e] = idxs\n return d_ind", "def genome_index_to_dict(self, index):\n chrom_pos = self.chrom_and_pos(index)\n return {'Chromosome': chrom_pos[0], 'Position': chrom_pos[1]}", "def __getitem__(self, key):\n return self.__values.__getitem__(key)", "def __getitem__(self, key):\n return self.feature_df.loc[key].to_dict()", "def vert_ind_as_val(input_vertices):\n\tvertex_map = {}\n\tfor element in input_vertices:\n\t\tvertex_map[element] = element\n\treturn vertex_map", "def obtain(self, key):\n if key in self:\n vals = self[key]\n else:\n vals = []\n dict.__setitem__(self, key, vals)\n return vals", "def get_dict(self):\n return {key: value for key, value in zip(self._words, self._vecs)}", "def id_index_map(self):\n result = {}\n for index, component_data in iteritems(self):\n result[id(component_data)] = index\n return result", "def index_to_mapping(self) -> Dict[int, int]:\n if not self._reverse_atom_mappings:\n self._reverse_atom_mappings = {\n index: mapping for mapping, index in self.mapping_to_index.items()\n }\n return self._reverse_atom_mappings", "def getitems(self):\n return {k:self.get(k) for k in self.keys}", "def create_index_dict(vcb_file):\n index_dict = {}\n vcb = open(vcb_file).readlines()\n for line in vcb:\n line = line.split()\n index_dict[int(line[0])] = line[1]\n return index_dict", "def index(self):\n path = self.path.format('index')\n \n with open(path, 'r', newline='') as file:\n l = list(csv.reader(file))\n \n index = [v for _ in l for v in _]\n index = dict((v, i) for (i, v) in enumerate(index))\n \n return index", "def get_idx_scores_mapping(scores):\n return {i: score for i, score in enumerate(scores)}", "def __getitem__(self, idx):\n\n text, label = self.data[idx]\n ids = self.get_ids(text)\n\n return {\"ids\": ids, \"label\": label}", "def __getitem__(self, key):\n\n return self.values[key]", "def to_dict_index(df):\r\n return df.to_dict('index')", "def extract_values(replacements, indices):\n extracted_values = {}\n for place_holder in replacements.keys():\n # get all occurrences of the place holder key\n parts = place_holder.split(place_holder_key)\n # only one part -> no place holder key found -> no strings to be extracted\n if len(parts) == 1:\n continue\n\n keys = [part[:1] for part in parts[1:]]\n\n value_index = indices[place_holder]\n\n entries = replacements[place_holder]\n entry_key = sorted(entries.keys())[value_index]\n\n # check that the keys are unique\n for key in keys:\n if key in extracted_values:\n raise ValueError(\"The replacement key '%s' was defined multiple times. Please use each key only once.\"%key)\n\n # extract values\n if len(keys) == 1:\n extracted_values[keys[0]] = entries[entry_key]\n\n else:\n for i in range(len(keys)):\n extracted_values[keys[i]] = entries[entry_key][i]\n\n return extracted_values", "def values(self) -> Dict[str, Any]:\n all_values = {}\n for name in self.names():\n idx = self.hyperparams[name][1]\n hp_type = self.hyperparams[name][0]\n if hp_type == 'object':\n all_values[name] = self.hyperparams[name][2][idx]\n else:\n all_values[name] = idx\n\n return all_values", "def __getitem__(self, index: int):\n data, label = self.images[index], self.labels[index]\n data = self._apply_transform(data)\n\n return {\"data\": data, \"target\": label}", "def get_data_from_indices(self, data_dictionary, indices):\n data = {}\n for tag in self.data_tags:\n try:\n data[tag] = data_dictionary[tag][indices]\n except KeyError:\n print(\"no this key in the current data file!\")\n return data", "def get_categories_enumerated_key_map(self):\n return dict(enumerate([c.name for c in self.categories]))", "def get_indexes_for_key (self,key):\r\n\r\n if self.using_database:\r\n aprint('GETTING INDEXES FOR KEY')\r\n value_tuple = (notebookname,key,)\r\n db_cursor.execute(\"SELECT note_index\"\r\n +\" FROM keys_to_indexes\"\r\n +\" WHERE notebook=? and keyword=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n\r\n return self.key_dict[str(key)]", "def index_records(vr):\n return collections.OrderedDict((record2key(rec), clean_sample_index(rec))\n for rec in vr)", "def _key_vals(dict_):\n return [(key, val) for key, val in dict_.iteritems()]", "def _key_index_iter(self: Any) -> Iterator[Tuple[str, Any]]:\n for k, v in vars(self).items():\n yield k, v", "def get_key_values(self):\n return self.key_values", "def split_index(params):\n if isinstance(params, dict):\n if NodeType.INDEX in params.keys():\n return split_index(params[NodeType.VALUE])\n result = {}\n for key in params:\n result[key] = split_index(params[key])\n return result\n else:\n return params", "def __getitem__(self, key):\n return tuple(self._mapping[key])", "def index(self, key):\n try:\n return self._keys.index(key)\n except ValueError:\n raise KeyError(key)", "def get_index(self, key):\n return self.keys.index(key)", "def as_dict(self):\r\n return {self.words[i]: self.vectors[i] for i in range(self.n)}", "def reverse_word_index(word_index):\n return dict([(word_index[word], word) for word in word_index])", "def __getitem__(self, index):\n A_path = self.A_paths[index]\n A_target = self.A_targets[index] # make sure index is within then range\n A_img = Image.open(A_path).convert('RGB')\n A = self.transform(A_img)\n return {'A': A, 'A_paths': A_path, 'A_target': A_target}", "def adj_dict(self):\n adj_dict = {i: [] for i in self.indices}\n for coeff in self.interactions[1:]:\n for _inds, value in coeff.items():\n for i in _inds:\n _inds_list = list(_inds)\n _inds_list.remove(i)\n adj_dict[i].append([_inds_list, value])\n return adj_dict", "def __getitem__(self, index):\n try:\n if isinstance(index, int):\n # the only reliable way is to iterate up to the index:\n return next(islice(self, index, None))\n if isinstance(index, slice):\n return list(islice(self, index.start, index.stop, index.step))\n else:\n key_return = list(self._dictitem_gen(index))\n if self.KEY_ACCESS_REDUCE_SINGLETONS and len(key_return) == 1:\n return key_return[0]\n else:\n return key_return\n except StopIteration:\n raise IndexError(\"list index out of range\")", "def index(self, key):\r\n return self.keyOrder.index(key)", "def keyValues(self): # real signature unknown; restored from __doc__\n return []", "def class_dict_from_index_dict(idx_dct):\n keys = sorted(idx_dct.keys())\n clas = sorted(keys, key=idx_dct.__getitem__)\n cla_dct = {i: tuple(c)\n for i, c in itertools.groupby(clas, key=idx_dct.__getitem__)}\n return cla_dct", "def tag_dict_values (self):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT keyword \"\r\n +\"FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n return self.tag_dict.values()", "def __getitem__(self, key: K) -> List[V]:\n return self._table.get(key)", "def vec_to_dict(docVec):\n return {dimension:value for dimension, value in enumerate(docVec)}", "def lstToDict(key, value):\n return dict(zip(key, value))", "def df_to_dict(df, key_column, val_column):\n xkey = df[key_column].tolist()\n xval = df[val_column].tolist()\n return dict(zip(xkey,xval))", "def __getitem__(self, key):\n return self._mappings[key]", "def _dictview(self) -> TracksDict:\n return self._items[self._keys, self._beads] # type: ignore", "def get_idxvals(self):\n input_rows = list()\n input_cols = list()\n for key in self.index:\n input_rows.append(key[0])\n input_cols.append(key[1])\n\n return list(OrderedSet(input_rows)), list(OrderedSet(input_cols))", "def __getitem__(self, idx):\n \n sample = {'num_atoms': self.num_atoms[idx],\\\n 'symbols': self.symbols[idx],\\\n 'charges': self.charges[idx],\\\n 'positions': self.positions[idx],\\\n 'data': self.data[int(np.floor(idx/2))]}\n\n return sample", "def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n sample = {}\n for i in range(len(self.key_words)):\n img_dir = os.path.join(self.base_dir, self.key_words[i])\n mask_dir = os.path.join(self.base_dir, self.key_words[i] + '_mask')\n key_word_data = get_rgbd_data(array_dir=img_dir,\n mask_dir=mask_dir,\n idx=idx)\n sample[self.key_words[i]] = key_word_data\n return sample", "def token_to_idx(self) -> Dict[Hashable, int]:\n return self._token_to_idx", "def mapToDict(dictionary, key):\n return dictionary[key]", "def __getitem__(self, key):\n return dict.__getitem__(self, key)", "def get_index(user_dict, product_dict):\n user_index = {}\n product_index = {}\n index = 0\n for user in user_dict.keys():\n user_index[user] = index\n index += 1\n index = 0\n for product in product_dict.keys():\n product_index[product] = index\n index += 1\n return [user_index, product_index]", "def get_dictionary_values(self,dictionary):\r\n return [dictionary[k] for k in self.get_dictionary_keys(dictionary)]", "def map_geo_hashed_value(l):\n \n l = sorted(l)\n return {k: index for index, k in enumerate(l)}", "def _get_key_set(self, d, key):\n dct = {}\n for k in list(d):\n dct[k] = d[k][0][key]\n return dct", "def group_by(y):\n\n y_dict = {}\n for i, value in enumerate(y):\n try:\n y_dict[value]\n #Value in dictionary\n y_dict[value].append(i)\n except:\n #Value not in dictionary\n y_dict[value] = [i]\n\n return y_dict", "def indexed_dataset(self) -> Dict[int, List]:\n if self.__indexed_dataset is None:\n dataset = self.dataset()\n truncated_dataset = dataset[:1000]\n self.__indexed_dataset = {\n i: dataset[i] for i in range(len(dataset))\n }\n return self.__indexed_dataset", "def partition(x):\r\n\r\n val = np.unique(x)\r\n d = {}\r\n for v in val:\r\n index = []\r\n for i in range(len(x)):\r\n if (x[i] == v):\r\n index.append(i)\r\n d[v] = index\r\n return d \r\n raise Exception('Function not yet implemented!')", "def indices(cls, hierarchical_dict: dict, indices: List[int]) -> dict:\n new_dict = {}\n all_keys = cls.get_all_keys(hierarchical_dict)\n for key in all_keys:\n value = cls.get(hierarchical_dict, key)\n if isinstance(value, numpy.ndarray) or isinstance(value, torch.Tensor):\n new_value = value[indices]\n elif isinstance(value, Sequence):\n new_value = [item for i, item in enumerate(value) if indices[i]]\n else:\n new_value = value\n cls.set(new_dict, key, new_value)\n return new_dict", "def create_dictionary_indexes(self):\n direction_dictionary = {}\n direction_dictionary[UP] = self.direction_list(UP)\n direction_dictionary[DOWN] = self.direction_list(DOWN)\n direction_dictionary[LEFT] = self.direction_list(LEFT)\n direction_dictionary[RIGHT] = self.direction_list(RIGHT)\n return direction_dictionary", "def __getitem__(self, index):\n return self.values[index]", "def __getitem__(self, key: K) -> Iterable[V]:\n raise NotImplementedError", "def index():\n\n return dict()", "def __getitem__(self, key):\n if isinstance(key, types.SliceType):\n # FIXME: does this raise the error we want?\n keys = self._sequence[key]\n # FIXME: efficiency?\n return OrderedDict([(entry, self[entry]) for entry in keys])\n else:\n return dict.__getitem__(self, key)", "def vector_indx_to_map_matrix_indx(index,senzory_map):\n xs = dict(zip(np.unique(senzory_map[:,0]), it.count()))\n ys = dict(zip(np.negative(np.unique(senzory_map[:,1])), it.count()))\n x, y = senzory_map[index]\n return ys[y],xs[x]", "def hashMap(self,arr):\r\n n = len(arr)\r\n dict1 = {}\r\n i = 1\r\n for i in range(n): \r\n if(i > 0): \r\n key=arr[i]\r\n value=arr[0]\r\n dict1[key] = value\r\n return dict1", "def generate_dict(self):\n dict = defaultdict(list)\n for i in range(self.no_of_docs-1):\n doc_txt = self.doc_to_df(i)\n #assign key to index in dictionary and its locations as tuples(docid,line,wordpos) as the values\n for j in range(len(doc_txt)):\n for k in range(doc_txt.shape[1]):\n key = doc_txt[k][j]\n dict[key].append((i,j,k))", "def GetSubkeyByIndex(self, index):", "def __getitem__(self, key):\n return self.keyvaluepair_set.get(key=key).value", "def values_to_dict(keys: tuple, values: list) -> dict:\n out = {}\n for i, key in enumerate(keys):\n out[key] = np.array([x[i] for x in values])\n return out", "def df_to_dict(df):\n return df.to_dict('index')", "def dict_to_keys_and_values(dictionary: dict, key_name: str = \"Key\", value_name: str = \"Value\") -> list:\n result = [{key_name: key, value_name: dictionary[key]} for key in dictionary]\n return result", "def values(self):\n return [self[k] for k in self.keys()]", "def __getitem__(self, index: int):\n path, label = self.paths[index], self.labels[index]\n data = self._read_input_file(path)\n data = self._apply_transform(data)\n\n return {\"data\": data, \"target\": label}", "def list_values(key):\n return meta.list_values(key=key)", "def getBitArrayIndices(self, key):\n\t\treturnList = []\n\t\tfor i in range(1, self.k + 1):\n\t\t\treturnList.append((hash(key) + i * mmh3.hash(key)) % self.m)\n\t\t#print \"Indices list for key: \", key, \" is: \", str(returnList)\n\t\treturn returnList", "def values(self):\n return [self[key] for key in self.keys()]", "def values(self):\n return [self[key] for key in self.keys()]", "def values(self):\n return [self[key] for key in self.keys()]", "def __getitem__(self, key):\n return self._dict[key]", "def get_index(self, key):\r\n\t\tindex = self._hash_function(key) % self.capacity\r\n\t\treturn index", "def index(self):\n return dict(data='index')", "def __getitem__(self, key):\n return self._d[key]", "def degree_index_dict(self):\n did = dict()\n for i,c in enumerate(self.classes):\n if isinstance(c, lambda_class) or isinstance(c, psi_class) or c == 0:\n continue \n try:\n degree = c.degree\n except AttributeError:\n degree = 1\n if not did.has_key(degree):\n did[degree] = []\n did[degree].append(i+1)\n return did", "def vec2spec_dict(n_bins, vec, spectra):\n return {spec: vec[i * n_bins:(i + 1) * n_bins] for i, spec in enumerate(spectra)}", "def return_index(self, idx):\n return (\n self.timeseries[idx],\n self.ch_amount,\n self.freq[idx],\n self.ch_name[idx],\n self.units[idx],\n )", "def __getitem__(self, index):\n A_path = self.A_paths[index % self.A_size] # make sure index is within then range\n if self.opt.serial_batches: # make sure index is within then range\n index_B = index % self.B_size\n else: # randomize the index for domain B to avoid fixed pairs.\n index_B = random.randint(0, self.B_size - 1)\n B_path = self.B_paths[index_B]\n A_img = Image.open(A_path).convert('RGB')\n B_img = Image.open(B_path).convert('RGB')\n # apply image transformation\n A = self.transform_A(A_img)\n B = self.transform_B(B_img)\n\n return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}", "def __getslice__(self, start, stop):\n tuples = [(key, self.dict[key])for key in self.dict.iterkeys()]\n tuples = sorted(tuples, key=itemgetter(1), reverse=True)[start:stop]\n return [key for key, value in tuples]", "def __getitem__(self, index):\n # read a image given a integer index\n A_path = self.A_paths[index]\n A_img = Image.open(A_path).convert('RGB')\n transform = get_transform(self.opt, grayscale=(self.input_nc == 1))\n A = transform(A_img)\n return {'A': A, 'A_paths': A_path}", "def _get_index(self, key):\n return self._hash_function(key) % self.capacity", "def __getitem__(self, index):\n if isinstance(index, types.SliceType):\n return [self._main[key] for key in self._main._sequence[index]]\n else:\n return self._main[self._main._sequence[index]]", "def __getitem__(self, key):\n return self.d[key]", "def get_dict(self):\n new_source_data = self.data.to_dict(orient=\"list\")\n new_source_data[\"index\"] = self.data.index\n for k in list(new_source_data):\n if isinstance(k, tuple):\n new_source_data[\"_\".join(k)] = new_source_data.pop(k)\n\n return new_source_data" ]
[ "0.63526154", "0.6277658", "0.61653423", "0.61640495", "0.6143637", "0.6119429", "0.61189026", "0.61189026", "0.6102903", "0.6055466", "0.59926194", "0.59890264", "0.5900034", "0.5844392", "0.5840268", "0.58324164", "0.5820689", "0.5770119", "0.5755432", "0.5730282", "0.5725843", "0.5717428", "0.5712312", "0.5705438", "0.56968224", "0.5678258", "0.5658363", "0.5637614", "0.56252915", "0.5620971", "0.5610194", "0.5608361", "0.55624586", "0.55602443", "0.5558344", "0.55430335", "0.5533721", "0.55336636", "0.5530438", "0.55275446", "0.55266577", "0.5525886", "0.55178905", "0.5509679", "0.55038077", "0.55014163", "0.54996836", "0.5489308", "0.5488919", "0.54831076", "0.54746884", "0.54625106", "0.5456757", "0.5436333", "0.54304844", "0.54240006", "0.53989404", "0.53963614", "0.53940034", "0.53728575", "0.53707236", "0.5369814", "0.5364865", "0.5361257", "0.5360543", "0.53578", "0.5355086", "0.5347374", "0.53469086", "0.5341658", "0.53384656", "0.533668", "0.5329938", "0.53259015", "0.5320475", "0.5318684", "0.53160775", "0.5313589", "0.5309489", "0.5305741", "0.5303996", "0.5302545", "0.52907145", "0.5284393", "0.5284076", "0.5284076", "0.5284076", "0.52816564", "0.5274943", "0.5268566", "0.52679", "0.5258892", "0.5252055", "0.5250728", "0.5246222", "0.5237714", "0.5237024", "0.5228476", "0.52277267", "0.5223829", "0.522076" ]
0.0
-1
Return unique key values
def unique(self, key, lst=None): d = self.find(key, lst) vals = set(d.values()) return sorted(list(vals))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unique_values(self):\n for key in self.metadb.unique_values():\n yield key, self.datadb[key]", "def distinct(self, key):\n return self.database.command({'distinct': self.name,\n 'key': key})['values']", "def _findUniqueMappingKeys(mapping):\n\n uniqueMappingKeys = set()\n for key, entries in viewitems(mapping):\n if len(entries) == 1:\n uniqueMappingKeys.add(key)\n return uniqueMappingKeys", "def unique_vals(client, proj, dataset, table, col_name):\n if not client.check_table(dataset, table):\n return []\n res = run_bq_query(client, \"SELECT %s FROM [%s:%s.%s] GROUP BY %s ORDER BY %s\" % (col_name, proj, dataset, table, col_name, col_name), 120)\n return [rec[col_name] for rec in res]", "def keys(self):\n return sorted(self._local_unique_map.keys())", "def _findUniqueMappingValues(mapping):\n uniqueMappingValues = set()\n for entries in viewvalues(mapping):\n if len(entries) == 1:\n uniqueMappingValues.update(entries)\n return uniqueMappingValues", "def unique(self, key: Callable[[T], Union[str, int, float]]=None) -> 'List[T]':\n return unique_values(self.array, key)", "def iunique(self, key: Callable[[T], Union[str, int, float]]=None) -> '_[T]':\n return _(unique_values(self.array, key))", "def uniq(val, key=None):\n if not isinstance(val, list):\n return val\n if key is None:\n try:\n return list(set(val))\n except TypeError:\n pass\n keys = []\n values = []\n for value in val:\n try:\n thiskey = value[key]\n except:\n thiskey = repr(value)\n if thiskey not in keys:\n keys.append(thiskey)\n values.append(value)\n return values", "def unique_rp(db):\n for rp in sorted(db['rp'].keys()):\n print(rp)", "def _unique(iterable):\n return list(dict.fromkeys(iterable))", "def unique_values(array: Iterable[T], key: Callable[[T], Union[str, int, float]]=None) -> List[T]:\n values = set()\n unique_array = []\n\n if key is None:\n for v in array:\n if v not in values:\n unique_array.append(v)\n values.add(v)\n else:\n for v in array:\n v_key = key(v)\n if v_key not in values:\n unique_array.append(v)\n values.add(v_key)\n\n return unique_array", "def _uniq( list ) : \r\n \r\n d = {} \r\n for e in list : \r\n d[e] = 1 \r\n \r\n return d.keys()", "def get_unique_keys(param_list):\n\tif not param_list:\n\t\treturn\n\tcounts = {}\n\tmax_count = len(param_list)\n\tfor p in param_list:\n\t\tfor k in p:\n\t\t\tcounts[k] = 1 + counts.get(k, 0)\n\tunique = []\n\t# now find out which keys are not shared\n\tfor k in counts:\n\t\tif counts[k] < max_count:\n\t\t\tunique.append(k)\n\tunique.sort()\n\treturn unique", "def delete_duplicate(x):\n return list(dict.fromkeys(x))", "def keySet (self) -> StringSet:\n\n Logging.trace(\">>\")\n result = set(self._keyToValueMap.keys())\n Logging.trace(\"<<: %r\", result)\n return result", "def get_key_values(self):\n return self.key_values", "def unique_ssh_results(results):\n r = {}\n for k in results:\n r[results[k][0]] = True\n return r.keys()", "def get_values(self):\n return set(self._table.keys())", "def keyValues(self): # real signature unknown; restored from __doc__\n return []", "def unique(self):\n # variables for uniques \n self._currentSet = 1\n self._uniqueValue = {}\n\n pd = self._dataTable\n for col in pd:\n arr = pd[col].unique()\n for i in arr:\n unique_entry = ((col,i),)\n self._uniqueValue[unique_entry] = 0 \n\n self._sets[self._currentSet] = self._uniqueValue", "def AllKeys(self) -> _n_0_t_1[str]:", "def get_unique_hstore_keys(\n session: 'Session',\n column: 'Column[dict[str, Any]]'\n) -> set[str]:\n\n base = session.query(column.keys()).with_entities( # type:ignore\n sqlalchemy.func.skeys(column).label('keys'))\n\n query = sqlalchemy.select(\n [sqlalchemy.func.array_agg(sqlalchemy.column('keys'))],\n distinct=True\n ).select_from(base.subquery())\n\n keys = session.execute(query).scalar()\n return set(keys) if keys else set()", "def _uniq(self, lst):\n h = {}\n for e in lst:\n h[e] = 1\n return sorted(h.keys())", "def unique(self):\n seen = {}\n result = []\n for p in map(tuple, self):\n if p not in seen:\n seen[p] = True\n result.append(p)\n return Pairs(result)", "def get_unique(self):\n return self.serie.nunique()", "def get_unique_values(local_data, attr):\n\tvalues = []\n\tfor element in local_data:\n\t\tif element[attr] not in values:\n\t\t\tvalues.extend([element[attr]])\n\treturn values", "def find_uniq_preserve_order(orig_keys, orig_values=None):\n seen = {}\n keys = []\n values = []\n for i, item in enumerate(orig_keys):\n if item in seen:\n continue\n seen[item] = 1\n keys.append(item)\n if orig_values:\n values.append(orig_values[i])\n return keys, values", "def keys():", "def unique(list_: List) -> List:\n return list(collections.OrderedDict.fromkeys(list_))", "def getSet(unique_name):", "def getSet(unique_name):", "def unique(seq, key=identity):\n seen = set()\n for item in seq:\n tag = key(item)\n if tag not in seen:\n seen.add(tag)\n yield item", "def _get_unique_genres(connection):\n print('---Getting unique genres---')\n genreDict = {}\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM shared_genres;\")\n res = cursor.fetchall()\n num_genres = 0\n for genre in res:\n if genre[1] not in genreDict:\n genreDict[genre[1]] = num_genres\n num_genres += 1\n return genreDict", "def unique(x):\n\n return list(set(x))", "def get_all_keys(self):\r\n all_keys = []\r\n for i in range(len(self.hash_table)):\r\n if self.hash_table[i] is not None:\r\n all_keys.append(self.hash_table[i].key)\r\n return all_keys", "def hash_key(self):", "def duplicates(self, x):\n return list(dict.fromkeys(x))", "def unique_values(self):\n return DiscoDBInquiry(super(DiscoDB, self).unique_values)", "def keys(self):\n return [kvp.key for kvp in self.keyvaluepair_set.all()]", "def unique_hash(self):\n raise NotImplementedError(\"unique_hash Method not implemented\")", "def unique_vals(rows, col):\n return set([row[col] for row in rows])", "def key_dict_values (self):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT note_index \"\r\n +\"FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n return self.key_dict.values()", "def keysAll():", "def keys(self):\r\n return [key for key, value in self.iteritems()]", "def unique_list(src_list):\n return list(OrderedDict.fromkeys(src_list).keys())", "def unique(iterable, key=None):\n\n\tif key is None:\n\t\tkey = identity\n\n\tseen = set()\n\tseen_add = seen.add\n\tfor v in iterable:\n\t\tk = key(v)\n\t\tif k not in seen:\n\t\t\tyield v\n\t\t\tseen_add(k)", "def keys(self) -> List:\n pass", "def get_all_keys(self):\n r = []\n with self.lock:\n for key in self.keys():\n if self.get(key):\n r.append(key)\n\n return r", "def keys(self):\n return list(self.token2id.values())", "def keys(self):\n return [key for key, value in self.items()]", "def uniqueResults( self, results ):\n rid_map = {}\n for r in results:\n rid_map[r.getRID()] = r\n return rid_map.values()", "def nunique(self, values: groupable) -> Tuple[groupable, pdarray]:\n # TO DO: defer to self.aggregate once logic is ported over to Chapel\n # return self.aggregate(values, \"nunique\")\n togroup = self._nested_grouping_helper(values)\n # Find unique pairs of (key, val)\n g = GroupBy(togroup)\n # Group unique pairs again by original key\n g2 = GroupBy(g.unique_keys[0], assume_sorted=False)\n # Count number of unique values per key\n keyorder, nuniq = g2.count()\n # The last GroupBy *should* result in sorted key indices, but in case it\n # doesn't, we need to permute the answer to match the original key order\n if not is_sorted(keyorder):\n perm = argsort(keyorder)\n nuniq = nuniq[perm]\n # Re-join unique counts with original keys (sorting guarantees same order)\n return self.unique_keys, nuniq", "def keys(self):\n\t\treturn tuple(self.dist.keys())", "def key(self):\n if self._key is None:\n fields = []\n for attr in self.__fields__:\n val = getattr(self, attr)\n if isinstance(val, list):\n val = tuple(val)\n fields.append(val)\n self._key = hash(tuple(fields))\n return self._key", "def key(self, x):\r\n return tuple(x)", "def keys(self):\r\n return [k for k in self]", "def uniq(iterable, key=lambda x: x):\n keys = set()\n res = []\n for x in iterable:\n k = key(x)\n if k in keys:\n continue\n\n res.append(x)\n keys.add(k)\n return res\n\n # Enumerate the list to restore order lately; reduce the sorted list; restore order\n # def append_unique(acc, item):\n # return acc if key(acc[-1][1]) == key(item[1]) else acc.append(item) or acc\n # srt_enum = sorted(enumerate(iterable), key=lambda item: key(item[1]))\n # return [item[1] for item in sorted(reduce(append_unique, srt_enum, [srt_enum[0]]))]", "def get_norm_key_values(self, board):\n return self.key_values", "def uniques_only_trey_oneline(iterable):\n return dict.fromkeys(iterable).keys()", "def unique_key(self):\n return json.dumps([self.name, self.birthDate])", "def unique(self):\n return frozenset(self)", "def unique_justseen(iterable, key = None):\n return imap(next, imap(operator.itemgetter(1), groupby(iterable, key)))", "def input_to_hash(self, keys):\n basic_keys = []\n for i, key in enumerate(keys):\n s = ''\n #print(max(key), min(key))\n for val in key:\n s += \"{:04x}\".format(val)\n basic_keys.append(s)\n return basic_keys", "def Keys(self) -> _n_1_t_4:", "def _unique(li):\n return list(set(li))", "async def keys(self) -> Iterable[str]:", "def _is_unique_key(self, key):\n return self._in_keys(key, self._unique_keys)", "def get_all_attributes(dataset):\n return dataset.flatMap(lambda x: x.keys()).distinct().collect()", "def edges_unique_key(graph):\n edges = list(map(sorted, graph.edges))\n return tuple(map(tuple, sorted(edges, key=itemgetter(0,1))))", "def keys(self):\n return [ x for x in self ]", "def keys(self):\n ks = dict.keys(self)\n ks.sort()\n return ks", "def keys(self):\n # TODO: Collect all keys in each of the buckets\n all_keys = [] # Will store all the key\n for bucket in self.buckets:\n for key in bucket:\n if key is not None:\n all_keys.append(key[0])\n return all_keys", "def uniqField(self, fieldname):\n\t\tretval = list(set(self.getField(fieldname)))\n\t\tretval.sort()\n\t\treturn retval", "def distinct(x):\n return list(set(x))", "def get_unique_hashes():\n return list( set( [ filename.split(\"_\")[0] for filename in os.listdir(CACHE_DIRECTORY) ] ) )", "def test_get_unique_drug_list(self):\n dict1 = self.test_dict\n dict2 = get_unique_drug_list(self.test_sorted_tuple)\n self.assertEqual(dict1, dict2)", "def get_keys(self):\r\n return self._keys", "def _get_keys(self, listOfKeys):\n return self._keys", "def gen_keys():", "def unique_column_values(rows, column_name):\r\n\r\n values = [] #Create an empty list\r\n for row in rows: #Iterate through each row\r\n values.append(row[column_name]) \r\n values = set(values)\r\n return values", "def enforce_unique_values(self):\n return self.properties.get('enforceUniqueValues', None)", "def unique_drugs(self):\n if self.results is not None:\n return tuple(self.results['drug'].unique())", "def get_keys(self) -> list:\r\n keys = []\r\n for key, value in self._items:\r\n keys.append(key)\r\n return keys", "def _key(self):\n return (self.name, self.array_type.upper(), self.values)", "def keys(self) -> tuple[Hashable, ...]:\n return tuple([self._hashify(item = c) for c in self.contents])", "def getall(self, key):\n return self.values.get(key, [])", "def items(self):\n return [(kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all()]", "def unique_instance(un_data):\n test_dict = dict()\n indexed = list()\n count = 0\n for i,item in enumerate(un_data):\n if not test_dict.has_key( hash(item) ):\n test_dict[ hash(item) ] = 0\n else:\n count = count + 1\n indexed.append(i)\n return count, indexed", "def unique_rows(self):\n return list(set([coord[0] for coord in self.landscape]))", "def tri(self, dico):\n return sorted(dico.keys(), key=str)", "def unique(seq, key=None):\n\tif key is None:\n\t\tkey = lambda x: x\n\tpreserved_type = type(seq)\n\tif preserved_type not in (list, tuple):\n\t\traise TypeError(\"unique argument 1 must be list or tuple, not {0}\".format(preserved_type.__name__))\n\tseen = []\n\tresult = []\n\tfor item in seq:\n\t\tmarker = key(item)\n\t\tif marker in seen:\n\t\t\tcontinue\n\t\tseen.append(marker)\n\t\tresult.append(item)\n\treturn preserved_type(result)", "def nonunique_gens(df,\n key_cols=['plant_id_eia', 'generator_id', 'report_date']):\n unique_gens = df.drop_duplicates(subset=key_cols)\n dupes = df[~df.isin(unique_gens)].dropna()\n dupes = dupes.sort_values(by=key_cols)\n return dupes", "def get_unique(self):\n unique_values = len(self.df[self.col_name].unique())\n return unique_values", "def uniq(seq: Iterable):\n seen, result = {}, []\n for item in seq:\n if item in seen:\n continue\n seen[item] = None\n result.append(item)\n return result", "def build_keyset(self, field):\n keys = set()\n for r in self.__elements__:\n keys.add(getattr(r, field))\n return list(keys)", "def unique(self, values: groupable): # type: ignore\n from arkouda import Categorical\n from arkouda.segarray import SegArray\n\n if isinstance(values, (Strings, Categorical)) or (\n isinstance(values, Sequence) and any([isinstance(v, (Strings, Categorical)) for v in values])\n ):\n raise TypeError(\"Groupby.unique not supported on Strings or Categorical\")\n\n togroup = self._nested_grouping_helper(values)\n # Group to unique (key, value) pairs\n g = GroupBy(togroup)\n ki, unique_values = g.unique_keys[0], g.unique_keys[1:]\n # Group pairs by key\n g2 = GroupBy(ki)\n # GroupBy should be stable with a single key array, but\n # if for some reason these unique keys are not in original\n # order, then permute them accordingly\n if not (g2.unique_keys == arange(self.ngroups)).all():\n perm = argsort(cast(pdarray, g2.unique_keys))\n reorder = True\n else:\n reorder = False\n # Form a SegArray for each value array\n # Segments are from grouping by key indices\n # Values are the unique elements of the values arg\n if len(unique_values) == 1:\n # Squeeze singleton results\n ret = SegArray(g2.segments, unique_values[0])\n if reorder:\n ret = ret[perm]\n else:\n ret = [SegArray(g2.segments, uv) for uv in unique_values] # type: ignore\n if reorder:\n ret = [r[perm] for r in ret] # type: ignore\n return self.unique_keys, ret # type: ignore", "def unique(self):\n num_rows = len(self._rows)\n if num_rows == 0:\n raise NoResults()\n elif num_rows > 1:\n raise MultipleResults(num_rows)\n cols = [col[0] for col in self.description]\n return OrderedDict(zip(cols, self._rows[0]))", "def nunique(self, values):\n return self.aggregate(values, \"nunique\")", "def values(self):\n # TODO: Collect all values in each of the buckets\n all_values = [] # Will store all the key\n\n for bucket in self.buckets:\n for value in bucket:\n if value is not None:\n all_values.append(value[1])\n return all_values" ]
[ "0.72342044", "0.7174483", "0.68495494", "0.67468446", "0.66601634", "0.6650714", "0.6622684", "0.6563308", "0.64356095", "0.64118284", "0.6411825", "0.6408245", "0.6370248", "0.6339864", "0.6293389", "0.6292799", "0.62809783", "0.6236327", "0.62303406", "0.6222865", "0.61984926", "0.6180261", "0.61780614", "0.6175361", "0.6149021", "0.6135467", "0.6114433", "0.6101749", "0.60853934", "0.60821867", "0.60821676", "0.60821676", "0.6072213", "0.6069357", "0.6061279", "0.6061184", "0.60572916", "0.6024121", "0.6019015", "0.6015315", "0.60077685", "0.6000313", "0.59968764", "0.596435", "0.5958339", "0.59383523", "0.5921118", "0.5912739", "0.58862734", "0.5876848", "0.58763534", "0.58665043", "0.5859382", "0.58511055", "0.584563", "0.5821039", "0.58169407", "0.5804728", "0.58003813", "0.57938135", "0.57926804", "0.57909", "0.57901776", "0.5784308", "0.5777681", "0.5776765", "0.5775969", "0.5775805", "0.5772939", "0.57715297", "0.576235", "0.5758513", "0.57575977", "0.575742", "0.57568896", "0.5754342", "0.5749548", "0.57482153", "0.57468075", "0.5744802", "0.57433534", "0.5730145", "0.57287407", "0.5721474", "0.5719423", "0.5716136", "0.5715314", "0.57122076", "0.57113886", "0.57107437", "0.5707623", "0.570669", "0.57057005", "0.5686758", "0.5678173", "0.56632686", "0.56518924", "0.5645534", "0.563781", "0.56358486" ]
0.7189072
1
Return unique combinations of keys in the box
def combinations(self, key_list, lst=None): lst = self.filtered(key_list, lst) tups = [tuple([d[INDEP].get(k, d[DEP].get(k)) for k in key_list]) for d in lst] s = set(tups) l = list(s) l.sort() return [{k: v for k, v in zip(key_list, vals)} for vals in l]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def keysAll():", "def generate_keys(self):\n self.keys = []\n key = string_to_bit_array(self.passwd)\n key = self.permutation(key, CP_1) # Perform initial permutation on the key\n g, d = split_into_n(key, 28) # Split into g (LEFT) & d (RIGHT)\n for i in range(16): # Apply the 16 rounds\n g, d = self.shift(g, d, ROUND_KEY_SHIFT[i]) # Shift the key according to the round\n tmp = g + d # Merge them\n self.keys.append(self.permutation(tmp, CP_2)) # Perform the permutation to get the Ki", "def get_all_combinations(self, hash_set):\n\t\tnames = sorted(hash_set)\n\t\treturn [dict(zip(names, prod)) for prod in it.product(\n\t\t*(hash_set[name] for name in names))]", "def iter_uniq(iterables):\n\n return (key for key in OrderedDict.fromkeys(chain(*iterables)))", "def allcombinations(orgset, k):\n return itertools.chain(*[combination(orgset, i) for i in range(1, k + 1)])", "def generate_input(s_terms):\n qm = QuineMcCluskey()\n res = set()\n if len(s_terms) == 0:\n return res\n for term in s_terms:\n res = res | set([i for i in qm.permutations(term)])\n return res", "def build_possible_naked_sets(c, setlength=2):\n ns = {}\n pairs = [p for p in c.values() if len(p) == setlength]\n for k, v in c.items():\n if v in pairs:\n ns[k] = sorted(v)\n return ns", "def keys():", "def combine_keys(*keys: bytes) -> bytes:\n key = hashlib.sha3_512(keys[0]).digest()\n for k in keys[1:]:\n next_key = hashlib.sha3_512(k).digest()\n\n key = bytes([\n a ^ b\n for (a, b)\n in zip(key, next_key)\n ])\n return key", "def unique(self):\n seen = {}\n result = []\n for p in map(tuple, self):\n if p not in seen:\n seen[p] = True\n result.append(p)\n return Pairs(result)", "def _findUniqueMappingKeys(mapping):\n\n uniqueMappingKeys = set()\n for key, entries in viewitems(mapping):\n if len(entries) == 1:\n uniqueMappingKeys.add(key)\n return uniqueMappingKeys", "def _expand_keys(entities):\n keys = list(entities.keys())\n values = list(product(*[entities[k] for k in keys]))\n return [{k: v for k, v in zip(keys, combs)} for combs in values]", "def _compute_type_keys(self, types):\n if self._len_keys == 1:\n return set(types)\n else:\n return {\n tuple(sorted(key))\n for key in combinations_with_replacement(types, self._len_keys)\n }", "def permutations(self, key):\n yield key", "def valid_parameter_combinations(parameterSpace):\n all_combinations = product(*parameterSpace.values())\n return [{k:v for k,v in zip(parameterSpace.keys(), combn)} for combn in all_combinations]", "def kchainbasis(h, k):\n\n import itertools as it\n kchains = set()\n for e in h.edges():\n if len(e) == k + 1:\n kchains.add(tuple(sorted(e.uidset)))\n elif len(e) > k + 1:\n kchains.update(set(it.combinations(sorted(e.uidset), k + 1)))\n return sorted(list(kchains))", "def __combination(orgset, k):\n if k == 1:\n for i in orgset:\n yield (i,)\n elif k > 1:\n for i, x in enumerate(orgset):\n # iterates though to near the end\n for s in __combination(orgset[i + 1 :], k - 1):\n yield (x,) + s", "def Keys(self) -> _n_1_t_4:", "def gen_keys():", "def unique_permutations(items):\n return set(permutations(items))", "def tri(self, dico):\n return sorted(dico.keys(), key=str)", "def perm_2_let():\r\n return {''.join(i) for i in permutations('abcdefghijklmnopqrstuvwxyz', 2)}\r\n # print(comb_2_let, sep='')\r", "def generate_combinations(rack,placed_tile):\n combinations_set = set()\n if placed_tile == \"\":\n for i in range(3, (len(rack)+1)):\n for x in itertools.combinations(rack, i):\n combinations_set.add(x)\n else:\n word = rack+placed_tile\n for i in range(3, (len(word)+1)):\n for x in itertools.combinations(word, i):\n if placed_tile in x:\n combinations_set.add(x)\n \n return combinations_set", "def find_unique_keys(base_config, comp_config, base_name):\n unique_keys = []\n unique_sections = []\n\n for section in base_config:\n if str(section) == 'DEFAULT':\n continue #.cfg has DEFAULT key, we do not use\n if not comp_config.has_section(section):\n unique_label = base_name + '.' + str(section)\n unique_sections.append(unique_label)\n continue\n\n for key in base_config[section]:\n if not comp_config.has_option(section, key):\n unique_label = str(section) + '.' + str(key)\n unique_keys.append(unique_label)\n continue\n #TODO: compare values?\n return unique_sections, unique_keys", "def exercise_b2_2():\r\n letters = ['a', 'e', 'i', 'o', 'u', 'u']\r\n combinations = list(permutations(letters))\r\n uniq_combinations = set(combinations)\r\n total_possibilities = len(combinations)\r\n total_uniq_possibilities = len(uniq_combinations)\r\n print(\"\\nThere are %s possible combinations and %s unique combinations for this set\\n\" \r\n % (total_possibilities, total_uniq_possibilities))\r\n return", "def distinct_blocks(cipher, size):\n\tdist_chucnks = {}\n\tfor chunk in chunks(cipher, size):\n\t\tdist_chucnks[chunk] = 1\n\treturn dist_chucnks", "def build_keyset(self, field):\n keys = set()\n for r in self.__elements__:\n keys.add(getattr(r, field))\n return list(keys)", "def input_to_hash(self, keys):\n basic_keys = []\n for i, key in enumerate(keys):\n s = ''\n #print(max(key), min(key))\n for val in key:\n s += \"{:04x}\".format(val)\n basic_keys.append(s)\n return basic_keys", "def chash(cycles):\n return {\"\".join(sorted(set(x))) for x in cycles}", "def AllKeys(self) -> _n_0_t_1[str]:", "def apriori_gen(Ls):\n Lks = Ls[len(Ls) -1] #L(k-1)\n LLength = len(Ls)\n Lc = combinations(Lks, r = LLength+1)\n fs = frozenset([i for i in Lc])\n\n Ck =[] #L(k)\n for s in fs:\n ckItem = frozenset()\n for ss in s:\n ckItem = ckItem.union(ss)\n if not has_infrequent_subset(ckItem, Lks):\n Ck.append(ckItem)\n\n# print \"Ck:\",Ck\n return Ck", "def get_pairs(self):\n self.get_locations()\n self.choices = {}\n for host, pathogens in self.locations.iteritems():\n if len(pathogens) > 1:\n for pair in combinations(pathogens, 2):\n self.choices.update({pair: host}) # pairs of pathogens in same host", "def gen_k_ary_ind_from_cliques(k: int, E: Iterable[Edge]) -> FrozenSet[Edge]:\n result = set()\n for i in E:\n result.update(map(Edge, itertools.permutations(i, k)))\n return frozenset(result)", "def get_unique_keys(param_list):\n\tif not param_list:\n\t\treturn\n\tcounts = {}\n\tmax_count = len(param_list)\n\tfor p in param_list:\n\t\tfor k in p:\n\t\t\tcounts[k] = 1 + counts.get(k, 0)\n\tunique = []\n\t# now find out which keys are not shared\n\tfor k in counts:\n\t\tif counts[k] < max_count:\n\t\t\tunique.append(k)\n\tunique.sort()\n\treturn unique", "def get_next_unconf_keys(self):\n UC_List = []\n for key in self.Poss_Tree:\n key_c = int(str(key)[-1])\n for choice in self.Poss_Tree[key]:\n if choice != key_c:\n UC_List.append(int(construct_pass(key, choice)))\n return UC_List", "def get_group_keys(self):\r\n if len(self.conflicting_exclusives) == 0:\r\n return [\"<none>\"]\r\n else:\r\n return self.key_to_targets.keys()", "def get_merge_key_list(self, phrase):\n key_list = []\n if self.isExcludedFromMerge(phrase) == False:\n #print \"KEY (1) %s\" % (phrase)\n key_list = [phrase]\n\n ngram = self.get_normalised_phrase(phrase)\n if self.isExcluded(ngram) == False and ngram not in key_list:\n #print \"KEY (2) %s\" % (ngram)\n key_list.append(ngram)\n word_list = ngram.split()\n if len(word_list) > 2:\n key_list.append(' '.join(word_list[0:3]))\n if len(word_list) > 1:\n key_list.append(' '.join(word_list[0:2]))\n\n for word in [x for x in word_list if self.isExcludedFromMerge(x.strip()) == False]:\n if word not in key_list:\n #print \"KEY (3) %s\" % (word)\n #print word\n key_list.append(word)\n\n return key_list", "def key_arr(x):\n b = [x & bit_mask(32)]\n x >>= 32\n while x > 0:\n b.insert(0, x & bit_mask(32))\n x >>= 32\n return tuple(b)", "def permutations(config):\r\n return list(set(itertools.permutations(config)))", "def keySet (map):\n ltset = lt.newList()\n for pos in range(lt.size(map['table'])):\n entry = lt.getElement (map['table'], pos+1)\n if (entry['key']!=None and entry['key']!='__EMPTY__'):\n lt.addLast (ltset, entry['key'])\n return ltset", "def get_combinations(self):\n all_steps = self.do_steps()\n self.option = [k for k, v in all_steps.items()]\n result = itertools.product(*(v for k, v in all_steps.items()))\n return result", "def naked_twins(values):\n # TODO: Implement this function!\n \n # First select boxes with 2 entries\n potential_twins = [box for box in values.keys() if len(values[box]) == 2]\n # Collect boxes that have the same elements\n naked_twins = [[box1,box2] for box1 in potential_twins for box2 in peers[box1] if set(values[box1])==set(values[box2]) ]\n #print(naked_twins)\n \n for twins in naked_twins:\n box1 = twins[0]\n box2 = twins[1]\n # 1- compute intersection of peers\n peers1 = set(peers[box1])\n peers2 = set(peers[box2])\n peers_int = peers1 & peers2\n # 2- Delete the two digits in naked twins from all common peers.\n for box in peers_int:\n if len(values[box])>=2:\n for rm_val in list(set(values[box1]+values[box2])):\n #print (box, \"=>\", values[box], \"removed\", rm_val)\n values = assign_value(values, box, values[box].replace(rm_val,''))\n\n return values", "def gen_all_holds(hand):\n from_hand = [()]\n for item in hand:\n for subset in from_hand:\n from_hand = from_hand + [tuple(subset) + (item, )]\n \n return set(from_hand)", "def build_coords_per_naked_set(ns):\n cpns = defaultdict(list)\n for pair in ns.values():\n if cpns.get(tuple(pair), 0): continue\n for k, v in ns.items():\n row, col = k\n if v == pair:\n cpns[tuple(pair)].append(k)\n return cpns", "def generateCandidates(self):\n\t\tprint(\"Candidate list:\\n\")\n\t\tkeys = list(self.prune_list.keys())\n\t\ttuple_count = len(keys[0])\n\t\tprune_list = {}\n\t\ttup = []\n\t\tfor v in comb(keys, 2):\n\t\t\ta = set(v[0])\n\t\t\tb = set(v[1])\n\t\t\t\n\t\t\t# If there's as many common element in a & b as one less than tuple_count\n\t\t\tif((len(a & b) == (tuple_count - 1)) and (tuple(a | b) not in tup)):\n\t\t\t\ttup.append(tuple(a | b))\n\t\t\t\tprint(tup[-1])\n\t\t\t\t# Update prune list\n\t\t\t\tcount = self.getSupport(tup[-1])\n\t\t\t\tif(count >= self.support):\n\t\t\t\t\tprune_list[tup[-1]] = count\n\t\treturn prune_list", "def get_all_combinations(param_opt):\n\tif not param_opt:\n\t\treturn {}\n\treturn (dict(zip(param_opt.keys(), x)) for x in itertools.product(*param_opt.values()))", "def uniques_only_trey_oneline(iterable):\n return dict.fromkeys(iterable).keys()", "def chain_unique(*iterables):\n seen = set()\n for element in iterables:\n for item in element:\n k = item.id\n if k not in seen:\n seen.add(k)\n yield item", "def generate_keys(g, o):\n priv = o.random()\n pub = priv * g\n\n return (priv, pub)", "def delete_duplicate(x):\n return list(dict.fromkeys(x))", "def get_shared_keys(param_list):\n\tif not param_list:\n\t\treturn\n\tkeys = set(param_list[0].keys())\n\tfor i in range(1, len(param_list)):\n\t\tkeys = keys.intersection(param_list[i].keys())\n\tkeys = list(keys)\n\tkeys.sort()\n\treturn keys", "def generate_option_combos(self):\n available_options = list()\n for option in self.options:\n # generate a list of dicts for every value of the option\n tmp = list()\n for value in option.values:\n tmp.append({option.name: value})\n\n available_options.append(tmp)\n\n # generate a list of tuples for each product option combination\n option_combos = list(itertools.product(*available_options))\n\n return option_combos", "def generate_words(combo,scrabble_words_dict):\n word_set = set()\n for w in itertools.permutations(combo):\n word = ''.join(w)\n if word in scrabble_words_dict:\n word_set.add(word)\n return word_set", "def keys(self) -> tuple[Hashable, ...]:\n return tuple([self._hashify(item = c) for c in self.contents])", "def duplicates(self, x):\n return list(dict.fromkeys(x))", "def generate_valid_keys():\n valid_keys = []\n for minimum, maximum in RANGES:\n for i in range(ord(minimum), ord(maximum) + 1):\n valid_keys.append(chr(i))\n return valid_keys", "def keys(self):\n if self._cubas is None:\n data = self.data\n self._cubas = set(\n CUBA[data.GetArray(index).GetName()]\n for index in range(data.GetNumberOfArrays()))\n return set(self._cubas)", "def generate_keys(cls, des_key: str) -> List[list]:\n\n keys = []\n des_key = cls.string_to_bit_array(des_key)\n # Apply the initial Permutation on the key\n des_key = cls.permutation_expand(des_key, Tables.PC_1_TABLE)\n # Split it in to LEFT,RIGHT\n left, right = cls.n_split(des_key, 28)\n # Apply the 16 rounds\n for i in range(16):\n # Apply the shift associated with the round (not always 1)\n left, right = cls.shift(left, right, Tables.SHIFT_ARRAY[i])\n # Merge them\n tmp = left + right\n # Apply the Permutation to get the Ki\n keys.append(cls.permutation_expand(tmp, Tables.PC_2_TABLE))\n return keys", "def get_all_combinations(self):\n stuffs = map(lambda row: row.split(\" \"), self.expanded['GS'] )\n\n combs = self.all_combinations(stuffs)\n\n cls_repeated = self.expanded['CLS'].reset_index(drop=True)[np.array(combs[0])]\n\n A = cls_repeated.reset_index(drop=True)\n B = pd.Series(combs[1])\n\n combo_table = pd.DataFrame([A, B]).T\n\n combo_table.columns = ['CLS','GSCMB']\n\n df = combo_table\n\n df['srt'] = [ ' '.join(map(str, g)) for g in df[\"GSCMB\"] ]\n keep_idx = df[[0,2]].drop_duplicates().index\n gewd = df.iloc[keep_idx,:].reset_index(drop=True)[[\"CLS\",\"GSCMB\"]]\n\n combo_table = gewd\n\n combo_dict = combo_table.groupby('CLS')['GSCMB'].apply(lambda x: x.tolist())\n return combo_dict", "def get_3away_pairs(kmers):\n k = len(kmers[0])\n if k == 1 or k==2:\n return []\n if k == 3:\n return [pair for pair in combinations(kmers, 2) if pair[0][0] != pair[1][0] and pair[0][1] != pair[1][1] and pair[0][2] != pair[1][2]]\n k_L = k//2\n k_R = k-k_L\n kmer_L_hashes = defaultdict(list)\n kmer_R_hashes = defaultdict(list)\n pairs = []\n kmers_L = []\n kmers_R = []\n for i, kmer in enumerate(kmers):\n kmer_L = kmer[:k_L]\n kmer_R = kmer[k_L:]\n #print(kmer_L)\n #print(kmer_R)\n kmers_L.append(kmer_L)\n kmers_R.append(kmer_R)\n kmer_L_hashes[kmer_to_int(kmer_L)] += [i]\n kmer_R_hashes[kmer_to_int(kmer_R)] += [i]\n for kmer_L_hash in kmer_L_hashes.values(): #same in first half\n if len(kmer_L_hash) > 1:\n kmer_L = kmers[kmer_L_hash[0]][:k_L] #first half\n pairs += [tuple(kmer_L + kmer for kmer in pair) for pair in get_3away_pairs([kmers[i][k_L:] for i in kmer_L_hash])] #differ by 3 in second half\n for kmer_R_hash in kmer_R_hashes.values(): #same in second half\n if len(kmer_R_hash) > 1:\n kmer_R = kmers[kmer_R_hash[0]][k_L:] #second half\n #print(kmer_R)\n pairs += [tuple(kmer + kmer_R for kmer in pair) for pair in get_3away_pairs([kmers[i][:k_L] for i in kmer_R_hash])] #differ by 3 in first half\n possible_pairs = []\n possible_pairs_L = get_1away_pairs(kmers_L)\n possible_pairs_R = get_2away_pairs(kmers_R)\n #print(kmers_L)\n #print(kmers_R)\n #print(possible_pairs_L)\n #print(possible_pairs_R)\n for possible_pair_L in possible_pairs_L:\n for possible_pair_R in possible_pairs_R:\n possible_kmer1 = possible_pair_L[0]+possible_pair_R[0]\n possible_kmer2 = possible_pair_L[1]+possible_pair_R[1]\n if possible_kmer1 in kmers and possible_kmer2 in kmers:\n pairs += [(possible_kmer1, possible_kmer2)]\n possible_pairs = []\n possible_pairs_L = get_2away_pairs(kmers_L)\n possible_pairs_R = get_1away_pairs(kmers_R)\n for possible_pair_L in possible_pairs_L:\n for possible_pair_R in possible_pairs_R:\n possible_kmer1 = possible_pair_L[0]+possible_pair_R[0]\n possible_kmer2 = possible_pair_L[1]+possible_pair_R[1]\n if possible_kmer1 in kmers and possible_kmer2 in kmers:\n pairs += [(possible_kmer1, possible_kmer2)]\n return(pairs)", "def gen_all_holds(hand):\n without_repeat = []\n mask_seque = list(gen_all_sequences([0,1], len(hand)))\n for dum_i in mask_seque:\n without_repeat.append(())\n \n for dum_i in range(len(mask_seque)):\n for dum_j in range(len(mask_seque[dum_i])):\n if (mask_seque[dum_i][dum_j]==1):\n without_repeat[dum_i]=list(without_repeat[dum_i])\n without_repeat[dum_i].append(hand[dum_j])\n without_repeat[dum_i]=tuple(without_repeat[dum_i])\n \n without_repeat = set(tuple(without_repeat))\n return without_repeat", "def get_next_keys(self):\n P_List = []\n for key in self.Poss_Tree:\n for choice in self.Poss_Tree[key]:\n P_List.append(int(construct_pass(key, choice)))\n return P_List", "def n_choose_kv(newK):\n values = np.zeros((1,newK+1))\n ks = np.arange(newK+1)\n \n for i in range(newK+1):\n values[i] = scipy.misc.comb(newK, ks[i])\n\n return values", "def get_unique_families(self,hkls):\n\n # TODO: Definitely can be sped up.\n def is_perm(hkl1, hkl2):\n h1 = np.abs(hkl1)\n h2 = np.abs(hkl2)\n return all([i == j for i, j in zip(sorted(h1), sorted(h2))])\n\n unique = collections.defaultdict(list)\n for hkl1 in hkls:\n found = False\n for hkl2 in unique.keys():\n if is_perm(hkl1, hkl2):\n found = True\n unique[hkl2].append(hkl1)\n break\n if not found:\n unique[hkl1].append(hkl1)\n\n pretty_unique = {}\n for k, v in unique.items():\n pretty_unique[sorted(v)[-1]] = len(v)\n\n return pretty_unique", "def keys(self):\n # TODO: Collect all keys in each of the buckets\n all_keys = [] # Will store all the key\n for bucket in self.buckets:\n for key in bucket:\n if key is not None:\n all_keys.append(key[0])\n return all_keys", "def gen_all_holds(hand):\r\n possible_holds = set([()])\r\n \r\n for dice in hand:\r\n temp_holds = possible_holds.copy()\r\n for hold in temp_holds:\r\n temp_seq = list(hold)\r\n temp_seq.append(dice)\r\n possible_holds.add(tuple(temp_seq))\r\n \r\n return possible_holds", "def keys(key, num_rounds):\n C, D = key[:28], key[28:]\n # Rounds are 1-indexed, so shift array over by one\n left_shifts = [None, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n for i in range(1, num_rounds + 1):\n # Negate each rotation to rotate left.\n C, D = rotate(C, -left_shifts[i]), rotate(D, -left_shifts[i])\n yield self.permute(C + D, self._CD_permutation)", "def keys(self):\n\t\treturn tuple(self.dist.keys())", "def cands(inputs):\n # The below could probably be simplified a bit....\n return map(''.join, list(itertools.chain.from_iterable([ map (list, (itertools.permutations(inputs, x))) for x in range(4, len(inputs)+1)])))", "def edges_unique_key(graph):\n edges = list(map(sorted, graph.edges))\n return tuple(map(tuple, sorted(edges, key=itemgetter(0,1))))", "def unique_ssh_results(results):\n r = {}\n for k in results:\n r[results[k][0]] = True\n return r.keys()", "def _add_unique_combinations(self) -> None:\n for single_target, (combined_transform, paths) in self._scratch.combinations.items():\n path_key = self._unique_combination_index(single_target, paths, combined_transform)\n self._transform_cycles[path_key] = 0\n self._scratch.primes[single_target] = path_key\n self._transform_to_index[combined_transform] = path_key\n self._index_to_transform[path_key] = combined_transform", "def find_uniq_preserve_order(orig_keys, orig_values=None):\n seen = {}\n keys = []\n values = []\n for i, item in enumerate(orig_keys):\n if item in seen:\n continue\n seen[item] = 1\n keys.append(item)\n if orig_values:\n values.append(orig_values[i])\n return keys, values", "def gen_decomp_keys(self, decomp_list):\n for key in decomp_list:\n if isinstance(key, tuple) or isinstance(key, list):\n yield key[0]\n else:\n yield key", "def unique_values(self):\n for key in self.metadb.unique_values():\n yield key, self.datadb[key]", "def allpermutations(orgset, k):\n return itertools.chain(*[permutation(orgset, i) for i in range(1, k + 1)])", "def combos(array,n=2): \n # base case\n if n==0:\n yield frozenset()\n return\n\n # core recursion\n for c in set(combos(array,n-1)):\n for i in array:\n #added this to avoid duplicate combos\n if i not in c:\n # add element i to combo c\n yield frozenset({i})| c", "def eliminate(values):\n complete_boxes = [box for box in values.keys() if len(values[box])==1]\n for box in complete_boxes:\n for peer in peers[box]:\n values = assign_value(values, peer, values[peer].replace(values[box], \"\"))\n \n return values", "def get_all_possible_pairs(self, a, b):\n return itertools.product(a, b)", "def subsets(x, k):\n sub_set = set()\n for i in x:\n sub_set = sub_set.union(set(combinations(i, k)))\n return list(sub_set)", "def keys(self):\n return sorted(self._local_unique_map.keys())", "def unique(self, key, lst=None):\n d = self.find(key, lst)\n vals = set(d.values())\n return sorted(list(vals))", "def components(self) -> Iterable[Mapping[T, Set[T]]]:", "def get_utxo_in_keys(self):\n\n in_utxo = []\n # Package the inputs\n for intx in self.inTx:\n inkey = pack(\"32sI\", intx.tx_id, intx.pos)\n in_utxo += [inkey]\n return in_utxo", "def addKey(s1, s2): \r\n return [i ^ j for i, j in zip(s1, s2)]", "def print_duplicates(non_uniques, ids, keys):\n for e in non_uniques:\n equiv_str = \"\"\n for id, key in zip(ids, keys):\n if id == e:\n equiv_str += key + \" <-> \"\n print(e + \" | \" + equiv_str[:-5])", "def duplicate_data(rng):\n N = 10\n keys = [str(1) for _ in range(1,N)]\n return zip(keys,\n [1 for _ in range(1,N)],\n [rng.random(128) for _ in range(N)],\n [random.choice([range(1,N),None]) for _ in range(N)])", "def unique_rp(db):\n for rp in sorted(db['rp'].keys()):\n print(rp)", "def get_pairs(terms):\n return itertools.combinations(terms, 2)", "def unique_blocks_only(gen):\n seen = set()\n count = 0\n for item in gen:\n key = tuple([int(x) for x in item])\n if key not in seen:\n seen.add(key)\n yield item\n count += 1\n # log.info(\"%s/%s were unique blocks\",len(seen),count)", "def get_key_set():\n keys = [0] * n_families\n for i in range(n_families):\n keys[i] = get_key(i)\n\n return keys", "def get_combo_list(self, loopdict, pegged=0):\n combolist=list()\n flatlists=list()\n loopkeys = list(loopdict.keys())\n loopkeys.sort()\n if pegged == 0:\n for loopkey in loopkeys:\n numloop = len(loopdict[loopkey]['looplist'])\n loopct=0\n flatlist=list()\n while loopct < numloop:\n flatlist.append(str(loopkey) + '-' + str(loopct))\n loopct = loopct + 1\n flatlists.append(flatlist)\n import itertools\n prod_list = itertools.product(*flatlists)\n stopiter = 0\n while not stopiter:\n try:\n mycomb = prod_list.next()\n except StopIteration:\n stopiter = 1\n if stopiter == 0:\n combolist.append(list(mycomb))\n elif pegged == 1:\n if len(loopkeys) == 0:\n return combolist #Empty list\n numloop = len(loopdict[loopkeys[0]]['looplist']) #all same len\n numct=0\n while numct < numloop:\n flatlist=list()\n for loopkey in loopkeys:\n flatlist.append(str(loopkey) + '-' + str(numct))\n numct = numct + 1\n combolist.append(flatlist)\n #print \"TTM DEBUG: \", flatlists\n return combolist", "def naked_twins(values):\n for u in unitlist:\n temp = {k:v for k, v in values.items() if k in u and len(v)==2}\n twins = {};\n \"\"\" generate dict twins like below \n {'23': ['A1', 'C3'],\n '27': ['B2'],\n }\n \"\"\"\n for k, v in temp.items():\n twins.setdefault(v, []).append(k);\n \n # {'24':['A2', 'B3', 'C1']} is not allowed. return False earlier\n if (len([k for k,v in twins.items() if len(v)>2 ])):\n return False;\n \n for k, v in twins.items():\n other_boxes = [ b for b in u if len(v)==2 and b not in v];\n for o in other_boxes:\n for digit in k:\n values[o] = values[o].replace(digit, '');\n\n return values;", "def get_parkey_map(self):\n pkmap = {}\n for selection in self.selections.normal_values():\n for parkey, choices in selection.get_parkey_map().items():\n if parkey not in pkmap:\n pkmap[parkey] = set()\n pkmap[parkey] |= set(choices)\n for parkey, choices in pkmap.items():\n pkmap[parkey] = list(pkmap[parkey])\n if \"CORR\" not in parkey:\n pkmap[parkey].sort()\n return pkmap", "def distinct(self, key):\n return self.database.command({'distinct': self.name,\n 'key': key})['values']", "def _unique(iterable):\n return list(dict.fromkeys(iterable))", "def table_reduction(self):\n answer = Surjection_element(torsion=self.torsion,\n convention='Berger-Fresse')\n for k1, v in self.items():\n d, a = len(k1) - 1, max(k1[0])\n for pi in partitions(d + a, d + 1, ordered=True):\n k2, removed = [], []\n degenerate = False\n for idx, i in enumerate(pi):\n filtered = [i for i in k1[idx]\n if i not in removed]\n if idx > 0 and k2[-1] == filtered[0]:\n degenerate = True\n break\n if i > 1:\n removed += filtered[: i - 1]\n k2 += filtered[: i]\n\n if not degenerate:\n answer += answer.create({tuple(k2): v})\n\n answer._reduce_rep()\n\n return answer", "def get_param_combinations(cls):\n for key, val in cls.param.items():\n if not isinstance(val, (list, Quantity)):\n cls.param[key] = [val]\n elif isinstance(val, Quantity) and val.size == 1:\n try:\n # check if val.value is iterable, e.g. a list or a NumPy array\n iter(val.value)\n except:\n cls.param[key] = [val.value] * val.unit\n combos = tuple(dict(zip(cls.param, combo)) for combo in it.product(*cls.param.values()))\n return tuple(c for c in filter(cls._param_validator, combos))", "def keys(self) -> List:\n pass", "def _make_unique_kwarg_list(\n seq: Sequence[Tuple[Any, Any]]\n) -> Sequence[Tuple[Any, Any]]:\n return [\n (pair[0], \"_\".join([pair[1], str(seq[:i].count(pair))]))\n if seq.count(pair) > 1\n else pair\n for i, pair in enumerate(seq)\n ]" ]
[ "0.62023306", "0.6162631", "0.59184194", "0.5876387", "0.5797509", "0.57924527", "0.5776189", "0.57255995", "0.57103395", "0.5674353", "0.5673763", "0.56703126", "0.56365746", "0.5633007", "0.56194043", "0.5603916", "0.55916125", "0.55871516", "0.5582946", "0.55571777", "0.55542487", "0.55502456", "0.55473363", "0.5537427", "0.5525622", "0.5513987", "0.54941213", "0.548784", "0.54801446", "0.5472203", "0.5470267", "0.5461784", "0.5459949", "0.54471874", "0.54452467", "0.54379", "0.5436944", "0.5427909", "0.54181576", "0.5417911", "0.54169315", "0.54132485", "0.54042125", "0.5379922", "0.5370501", "0.53668034", "0.5353355", "0.53522867", "0.53394824", "0.53333336", "0.53277075", "0.5326874", "0.53235024", "0.5318766", "0.5316935", "0.5315398", "0.53081816", "0.5308065", "0.53079575", "0.5305297", "0.52986366", "0.52945805", "0.52943873", "0.529012", "0.5289967", "0.528826", "0.5287075", "0.52849257", "0.5274917", "0.5272107", "0.5266969", "0.5259234", "0.5253927", "0.5251832", "0.5247056", "0.5246578", "0.52403855", "0.5238857", "0.5233829", "0.52247304", "0.52223796", "0.5219355", "0.5214716", "0.52116007", "0.52109", "0.52093655", "0.52041966", "0.519788", "0.5194616", "0.5194496", "0.5193258", "0.5179522", "0.5175587", "0.5174501", "0.5167825", "0.5159649", "0.5156578", "0.5152673", "0.51492953", "0.51472414" ]
0.57542413
7
Return the value for a key at a given index
def item(self, index, key): row = self[index] if key in row[DEP]: return row[DEP][key] elif key in row[INDEP]: return row[INDEP][key] else: raise KeyError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def value_for_index(self, index):\r\n return self[self.keyOrder[index]]", "def get_value_by_index(self, index):\n return self['value'][index]", "def __getitem__(self, index):\n return self._value_at(index)", "def __getitem__(self, index):\n return self._value_at(index)", "def get_by_index_key(self, index, key=str):\n return str(self.get(key, self.get_all_childname(key)[index]))", "def get_by_index(self, index):\n # makes it easier for callers to just pass in a header value\n index = int(index) if index else 0\n return self.by_index.get(index)", "def py__simple_getitem__(self, index):\n compiled_value_index = compiled.create_simple_object(self.inference_state, index)\n for key, value in self.get_tree_entries():\n for k in self._defining_context.infer_node(key):\n for key_v in k.execute_operation(compiled_value_index, u'=='):\n if key_v.get_safe_value():\n return self._defining_context.infer_node(value)\n raise SimpleGetItemNotFound('No key found in dictionary %s.' % self)", "def __getitem__(self, key):\n ndx = self._findPosition(key)\n assert ndx is not None, 'Invalid map key'\n return self._entryList[ndx].value", "def get_key_at_index(self, index):\n return self.chain_key.subkey(index)", "def get(self, key):\n h = key%self.m\n a = self.a\n if a[h]:\n return a[h].val\n else:\n return -1", "def __getitem__(self, index):\n return self.values[index]", "def get(self, index):\n\n return self.values[index]", "def __getitem__(self, index):\n try:\n if isinstance(index, int):\n # the only reliable way is to iterate up to the index:\n return next(islice(self, index, None))\n if isinstance(index, slice):\n return list(islice(self, index.start, index.stop, index.step))\n else:\n key_return = list(self._dictitem_gen(index))\n if self.KEY_ACCESS_REDUCE_SINGLETONS and len(key_return) == 1:\n return key_return[0]\n else:\n return key_return\n except StopIteration:\n raise IndexError(\"list index out of range\")", "def __getitem__(self, key):\n return self.keyvaluepair_set.get(key=key).value", "def get(self, key):\n index = key % self.size\n\n cur = self.bucket[index]\n while cur:\n if cur.key == key:\n return cur.val\n cur = cur.next\n return -1", "def __getitem__(self, key):\n\n return self.values[key]", "def getvalue(self, index):\n self._checkIndex(index)\n return self._items[index].value", "def get(self, key: int) -> int:\n if key in self.keys:\n return self.values[self.keys.index(key)]\n else:\n return -1", "def __getitem__(self, key):\n if self.containsKey(key):\n return self.get(key)\n else:\n raise IndexError()", "def get(self, key):\n hashv = self.hash(key)\n bucket=self.hashmap[hashv]\n for i,(k,v) in enumerate(bucket):\n if k==key:\n return v\n return -1", "def get_d_value(self, key=\"\", index=-1):\n _key = key\n _index = index\n if(_index < 0):\n _index = self.get_index(_key)\n _value = self.values[_index].strip()\n _type = self.types[_index].strip().lower()\n if _type == \"string\" or _type == \"\":\n return _value\n if _value == \"\":\n return 0\n return eval(_type)(_value)", "def GetSubkeyByIndex(self, index):", "def get(self, key):\n i = key //1000\n j = key%1000\n return self.container[i][j]", "def __getitem__(self, key):\n self.__check_key_validity(key)\n return self.data[key[0]][key[1]]", "def __getitem__(self, key):\n return self._dict[key]", "def __getitem__( self, index ) :\n\n return( self.__entries[index] )", "def get(self, key: int) -> int:\n index = key % 10000\n head = self.array[index]\n while head.next:\n head = head.next\n if head.key == key:\n return head.value\n break\n return -1", "def get_value(self, key):\n return self[key]", "def __getitem__(self, key):\n return self._get(key)", "def get(key):\n return current().values[key]", "def get(self, key: int) -> int:\n idx = key % self.size\n curr = self.hashmap[idx]\n while curr:\n if curr.key == key:\n return curr.value\n else:\n curr = curr.next\n return -1", "def __getitem__(self, key):\n return self.get(key)", "def __getitem__(self, key):\n return self.get(key)", "def __getitem__(self, key):\n return self.get(key)", "def __getitem__(self, key):\n return self._[key]", "def get(self, k: Any) -> Any:\n i = abs(hash(k)) % self.size\n current = self.data[i]\n while current is not None:\n if current.key == k:\n return current.value\n current = current.next\n return None", "def get(self, key):\r\n for i in range(len(self.lis)):\r\n if self.lis[i][0] == key:\r\n return self.lis[i][1]\r\n \r\n return -1", "def get(self, key):\r\n index = self.hash(key)\r\n l = self.bucket[index]\r\n while l.next:\r\n if l.next.key == key:\r\n return l.next.val\r\n l = l.next\r\n return -1", "def __getitem__(self, key):\n _, kv_pair = self._lookup(key, self._backing)\n if kv_pair:\n return kv_pair.value\n else:\n return Hashmap.absent", "def __getitem__(self, value):\n\n # Select the correct index\n if isinstance(value, six.integer_types):\n idx = self.by_value\n elif isinstance(value, six.string_types):\n idx = self.by_name\n else:\n raise KeyError(value)\n\n # Look up the value in that index\n return idx[value]", "def __getitem__(self, key):\n return self.__values.__getitem__(key)", "def __getitem__(self, index):\n if index >= self.size:\n raise KeyError\n else:\n return self._get_item(index)", "def _value_at(self, index):\n node = self._get_node_at(index)\n if node is None:\n raise IndexError('List index out of range.')\n return node.value", "def _value_at(self, index):\n node = self._get_node_at(index)\n if node is None:\n raise IndexError('List index out of range.')\n return node.value", "def __getitem__(self, key):\n for entry_key, value in self.read(key):\n if entry_key != key:\n raise KeyError(key)\n return value\n raise KeyError(key)", "def _bucket_getitem(self, j, k):\n bucket = self._table[j]\n if bucket is None: # no match found\n raise KeyError(\"Key Error: \" + repr(k))\n return bucket[k]", "def find_value(dic, key):\n return dic[key]", "def get(self, key: int) -> int:\n index = self.hash(key)\n curr = self.map[index]\n\n # Search through list\n while curr:\n # If the value in list matches key, return value\n if curr.val[0] == key: return curr.val[1]\n curr = curr.next\n\n # If it doesn't exist, return -1\n return -1", "def get(self, key: int) -> int:\n idx = key % 1000\n if not self.map[idx]:\n return -1\n else:\n curr = self.map[idx]\n while curr:\n if curr.key == key:\n return curr.val\n curr = curr.next\n return -1", "def get_value(self, key):\n pass", "def get(self, key: int) -> int:\n index = key % self.size\n \n if self.table[index].value is None:\n return -1\n \n p = self.table[index]\n \n while p:\n if p.key == key:\n return p.value\n p = p.next\n return -1", "def __getitem__(self, key):\r\n T=type(key)\r\n if T!=types.IntType and T!=types.LongType:\r\n raise TypeError, \"index must be integer\"\r\n\r\n if key==0: return self.x\r\n elif key==1: return self.y\r\n elif key==2: return self.z\r\n elif key==3: return self.w\r\n else:\r\n raise IndexError,\"index out of range\"", "def __getitem__(self, key):\n\n return self.fvals[key]", "def get(self, index):\n if 0 <= index <= len(self.nums):\n return self.nums[index]\n return -1", "def __getitem__(self, idx):\n if idx < 0 or idx >= self.length():\n raise KeyError()\n return self.data[idx]", "def get(self, key):\n index = key % self.size\n curr_node = self.hash_table[index]\n\n while curr_node:\n if curr_node.key == key:\n return curr_node.value\n else:\n curr_node = curr_node.next\n\n return -1", "def get(self, index: int) -> int:\n node = self.get_node(index)\n if node:\n return node.val\n else:\n return -1", "def __getitem__(self, k):\n j = self._hash_function(k)\n return self._bucket_getitem(j, k)", "def get(self, key):\n return self.arr[key]", "def get(self, key: int) -> int:\n hashvalue = key%1000\n if self.hashset[hashvalue]==None:\n return -1\n head = self.hashset[hashvalue]\n \n while head:\n k,v = head.data \n if k==key:\n return v\n \n head = head.next\n return -1", "def get_at_index(self, index: int) -> object:\n return self.data[index]", "def get(self, key):\n if key < self.length:\n return self.buckets[key]\n return -1", "def __getitem__(self,index):\n return self._data[index[0]][index[1]]", "def get(self, index: int) -> int:\n cnt = 0\n cur = self.head \n while cur != None:\n if(cnt == index):\n return cur.val\n cur = cur.next \n cnt += 1\n return -1", "def get(self, key, index=None, recurse=False):\n assert not key.startswith('/')\n params = {}\n if index:\n params['index'] = index\n if recurse:\n params['recurse'] = '1'\n\n def callback(response):\n if response.code == 404:\n data = None\n else:\n data = json.loads(response.body)\n for item in data:\n item['Value'] = base64.b64decode(item['Value'])\n if not recurse:\n data = data[0]\n return response.headers['X-Consul-Index'], data\n\n return self.agent.http.get(\n callback, '/v1/kv/%s' % key, params=params)", "def __getitem__(self, key):\r\n return self.items[bytes(key)]", "def get(self, key: int) -> int:\n t = key % 20011\n for item in self.hash[t]:\n if item[0] == key:\n return item[1]\n return -1", "def get(self, key):\n ha = self.myhash(key)\n if key not in self.hashmap[ha][0]:\n return -1\n else:\n return self.hashmap[ha][1][self.hashmap[ha][0].index(key)]", "def __getitem__(self, key):\n return self._d[key]", "def get_value_at_index(self, index, cc):\n tl = cc.dsget(self.title)\n return (tl[index], None)", "def getKeyValue(self,\n key,\n no = 0):\n keyword = key + \"___\" + str(no + 1)\n return self.__keyObjs[keyword].getValue()", "def __getitem__(self, key: ir.Value) -> ir.Value:\n return ops.MapGet(self, key).to_expr()", "def __getitem__(self, index):\n item = self.data[index]\n return item", "def __getitem__(self, key):\n return self.d[key]", "def __getitem__(self, key):\n return self()[key]", "def get(self, key: int) -> int:\n idx = key % self.size\n if self.mp[idx]:\n for i in range(len(self.mp[idx])):\n if self.mp[idx][i][0] == key:\n return self.mp[idx][i][1] \n return -1\n else:\n return -1", "def __getitem__(self, key):\n return self.__data[key]", "def __getitem__(self, v):\r\n return self.unif.get(v, (v, None))[0]", "def __getitem__(self, index):\n if self.valid_index(index):\n return self._data[index]\n else:\n return IndexError", "def __getitem__(self,idx):\n try:\n return self._cache[idx]\n except:\n pass\n\n try:\n # return full data entry as list\n out = self._data[idx]\n self._cache[idx] = out\n return out\n except:\n try:\n # return data entry with specified key word\n out = self._data[idx[0]][self._header[self._alias[idx[1]]]]\n self._cache[idx] = out\n return out\n except:\n pass", "def __getitem__(self, index):\n return self.data[index]", "def __getitem__(self, index):\n return self.data[index]", "def __getitem__(self, key):\n return self.data[key]", "def __getitem__(self, key):\n return self.data[key]", "def __getitem__(self, key):\n return self.data[key]", "def __getitem__(self, key):\n try:\n return self._get_slice(self.data_array, key)\n except KeyError:\n return self.read(bls=key)[0][key]", "def get_item(self, index: int) -> _T:\n return self.index_to_item[index]", "def get(self, index):\n raise NotImplementedError() # pragma: no cover", "def get(self, index):\n self.__validate_index(index)\n return self.__list[index]", "def get(self, key: K) -> Optional[V]:\n return self.mget([key])[0]", "def get(self, key):\n\t\treturn self.__get(key, key[1:])", "def get_value(self, key):\n try:\n return self.map[key]\n except KeyError:\n raise KeyError('key is not in map')", "def __getitem__(self, index):\n # attempt to\n try:\n # cast {index} to an integer\n index = int(index)\n # if this fails\n except TypeError:\n # ask my tile do the rest\n value = self.data[self.tile.offset(index)]\n # otherwise\n else:\n # retrieve the item directly from my container\n value = self.data[index]\n # all done\n return value", "def get(self, key):", "def get(self, key):", "def __getitem__(self, key):\n return self.params[key].value", "def __getitem__(self, key):\r\n return self.data[key]", "def __getitem__(self, key):\n position = self.hash(key)\n\n for _ in range(self.table_capacity):\n if self.array[position] is None:\n raise KeyError(key)\n elif self.array[position][0] == key:\n return self.array[position][1]\n else:\n position = (position + 1) % self.table_capacity\n raise KeyError(key)", "def get(self, key: int) -> int:\n chain, idx = self._search(key)\n if idx is not None:\n return chain[idx][1]\n return -1", "def get(self, key):\n return self[key]", "def get_value(self, key):\n return self[key]['value']" ]
[ "0.8506093", "0.75719273", "0.73860204", "0.73860204", "0.73637843", "0.72136736", "0.7130539", "0.7129745", "0.7118838", "0.71075356", "0.71021324", "0.7093473", "0.70449865", "0.69830865", "0.69765884", "0.6959366", "0.6956402", "0.6931386", "0.6903598", "0.68861365", "0.68755597", "0.68540126", "0.68286085", "0.680037", "0.67987335", "0.67758256", "0.67750514", "0.6769317", "0.67573833", "0.67392844", "0.67374676", "0.6724357", "0.6724357", "0.6724357", "0.6699793", "0.66926265", "0.66889715", "0.66786766", "0.6672144", "0.6667701", "0.66676575", "0.66646945", "0.6663624", "0.6663624", "0.6652759", "0.6652696", "0.6624027", "0.6622981", "0.6619752", "0.6618054", "0.66134804", "0.66113394", "0.660793", "0.66063684", "0.660347", "0.66029143", "0.6598972", "0.659801", "0.6584341", "0.65793765", "0.656466", "0.65643865", "0.6563587", "0.6561103", "0.655714", "0.6553246", "0.6547174", "0.654284", "0.6524107", "0.65237945", "0.6522827", "0.65089625", "0.65027666", "0.6502551", "0.6498196", "0.6490823", "0.6489367", "0.6482685", "0.6478786", "0.647849", "0.64761394", "0.64761394", "0.6476139", "0.6476139", "0.6476139", "0.64741886", "0.6473867", "0.64644337", "0.6462687", "0.6460466", "0.6458039", "0.6457941", "0.6456207", "0.6454012", "0.6454012", "0.64421713", "0.6441062", "0.6433053", "0.6431174", "0.6425388", "0.64216715" ]
0.0
-1
Allow easier access to data
def __getitem__(self, keys): if isinstance(keys, int): return super().__getitem__(keys) elif isinstance(keys, list): return self.vectors(keys) elif isinstance(keys, str): return self.find(keys) elif isinstance(keys, tuple): return self.item(keys[0], keys[1]) elif isinstance(keys, slice): return super().__getitem__(keys)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(self):", "def data(self):", "def get_data(self):\r\n pass", "def get_data():\n pass", "def get_data(self):\n pass", "def get_data(self):\n pass", "def _get_data(self):\n raise NotImplementedError()", "def get_data():\n return", "def fetch_data(self):", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def data(self):\n pass", "def data(self):\n pass", "def _read_data(self):", "def _fetch_data(self):\n pass", "def data(self):\r\n raise NotImplementedError", "def get_objects_data(self):\n pass", "def GetDataAsObject(self):", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def data():\n return None", "def load_data(self):", "def prepare_data(self):", "def data(self, key=None):\n pass # pragma: no cover", "def extract(self, data):", "def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')", "def getData(self, local_cache):", "def load_data(self) -> None:", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def test_data_object_get_details(self):\n pass", "def data(self):\n return self._data", "def get_data(self):\n\n return super().get_data()", "def get_data(self): # TODO: add smooth possibility\n return self.data", "def readOneData(self):\n\t\tpass", "def data(self):\n\t\treturn vars(self)", "def GetValues(self):", "def get(self):\r\n return self.data", "def data_object(self) -> any:\r\n\r\n return self.__data_object", "def request_data(self):\n pass", "def data(self):\n return self", "def get(self, data):\n pass", "def _get_to_actual_data(raw):\n raise NotImplemented", "def get_details(self):", "def _get_information(self):\n pass", "def retrieve(self):\n pass", "def show_data():", "def _extract_data(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def __getitem__(self, *args):\n return self.data.__getitem__(*args)", "def data(value):\n return value.data", "def _retrieveCachedData(self):", "def __getitem__(self, key):\n return self.data[key]\n # pass", "def get(self, obj):", "def data(self) -> dict:\n raise NotImplementedError()", "def get_data(self):\n\t\tsample = self.iterator.get_next()\n\n\t\tself.user = sample['user']\n\t\tself.item = sample['item']\n\t\tself.user_feature = sample['user_feature']\n\t\tself.item_feature = sample['item_feature']\n\t\tself.rating = sample['rating']", "def GetValues(self):\n ...", "def GetValues(self):\n ...", "def map_data(self, obj: object):\n pass", "def attribute(self, data, model, model_name):", "def getvalue(self):\n ...", "def getvalue(self):\n ...", "def parse_data(self):\n\t\traise NotImplementedError('%s: No parse function implemented!' % self.name)", "def __getitem__(self, key):\r\n return self.data[key]", "def get_data(self, name):\r\n return(self.__table__.get(name))", "def getStockData():\n pass", "def get(self):\n return self.data", "def data(self):\n return self.__dict__", "def read_data(self):\n raise NotImplementedError", "def get_data(self):\r\n return self.data.copy()", "def get_data(self):\r\n return self.names", "def values():", "def get_data(self, context):\n # Things to do\n return context", "def access():", "def get_data(self):\n return self.parsed_data", "def data(self) -> dict[str, Any]:\n raise NotImplementedError()", "def _data(self):\n if self._data_ is None:\n self._data_ = {}\n return self._data_", "def __init__(self, dat):\n self.data = dat", "def read(self):", "def __getitem__(self, key):\n return self.data[key]", "def __getitem__(self, key):\n return self.data[key]", "def __getitem__(self, key):\n return self.data[key]", "def load_data(self):\n raise NotImplementedError()", "def Value(self) -> object:", "def Value(self) -> object:", "def test_data_object_vaporise(self):\n pass", "def post_load(self, data):\n return data", "def getInfo():", "def data(self):\n return self.d", "def getData(self):\n return self.data", "def getData(self):\n return self.data", "def getData(self, slice=None):\n\t\traise NotImplementedError", "def getData(language=None):", "def __getitem__(self, name):\n if name in self.data: return self.data[name]", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data" ]
[ "0.787504", "0.77359474", "0.7700366", "0.7674823", "0.7586071", "0.7586071", "0.72750527", "0.72719157", "0.7201379", "0.7178601", "0.7178601", "0.7178601", "0.71383196", "0.71383196", "0.70190066", "0.6975672", "0.69423085", "0.6939048", "0.693423", "0.6889714", "0.672735", "0.6723122", "0.6646133", "0.66299415", "0.66229963", "0.65470636", "0.6457502", "0.64458865", "0.64453185", "0.64453185", "0.64453185", "0.6438144", "0.6436452", "0.642792", "0.638235", "0.6380393", "0.63655394", "0.6343154", "0.63144666", "0.63131994", "0.62932324", "0.6292083", "0.62776625", "0.6275667", "0.62667745", "0.6259537", "0.6223121", "0.6219622", "0.6173387", "0.61583835", "0.6145052", "0.613252", "0.61287785", "0.6126513", "0.60899323", "0.60782915", "0.6076923", "0.6076923", "0.6060034", "0.6053539", "0.6019369", "0.6019369", "0.60145545", "0.60134435", "0.6010794", "0.6004578", "0.5990388", "0.59748805", "0.5971075", "0.59690046", "0.5964314", "0.59616137", "0.5949508", "0.59485036", "0.5948186", "0.5944497", "0.59441584", "0.5943676", "0.59413373", "0.5939282", "0.5939282", "0.5939282", "0.59374106", "0.5925556", "0.5925556", "0.592454", "0.5924305", "0.5914087", "0.5911472", "0.5908087", "0.5908087", "0.5894406", "0.5893075", "0.58841705", "0.58821106", "0.58821106", "0.58821106", "0.58821106", "0.58821106", "0.58821106", "0.58821106" ]
0.0
-1
Format box contents in a concise way
def show(self, lst=None): def f(v): if np.size(v) == 1: return str(v) elif np.size(v) > 3: return str(np.shape(v)) elif np.ndim(v) > 1: return str(np.shape(v)) else: return str(v) def buffer(l, m, n=25): end = len(l) - 1 buffered = [] for i in range(m): if i > end: buffered.append("".ljust(n)) else: buffered.append(l[i].ljust(n)) return buffered lst = self if lst is None else lst out = [IND.ljust(7) + INDEP.ljust(60) + DEP.ljust(60)] for row in lst: ind = [str(row[IND])] dep = [k + ": " + f(v) for k, v in row[DEP].items()] indep = [k + ": " + f(v) for k, v in row[INDEP].items()] m = max(len(dep), len(indep), 1) ind = buffer(ind, m, 7) dep = buffer(dep, m, 60) indep = buffer(indep, m, 60) for a, b, c in zip(ind, indep, dep): out.append(a + b + c) out.append("") return "\n".join(out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_box(box, box_style, border_visible, border_color, border_style,\n border_width, border_radius, padding, margin):\n box.box_style = box_style\n box.padding = padding\n box.margin = margin\n if border_visible:\n box.border_color = border_color\n box.border_style = border_style\n box.border_width = border_width\n box.border_radius = border_radius\n else:\n box.border_width = 0", "def _change_box_format(self, boxes):\n boxes[..., 0] = boxes[..., 0]\n boxes[..., 1] = boxes[..., 1]\n boxes[..., 2] = boxes[..., 2]\n boxes[..., 3] = boxes[..., 1] + boxes[..., 3]\n boxes[..., 4] = boxes[..., 2] + boxes[..., 4]\n return boxes", "def display(self):\n width = 1 + max(len(self.values[s]) for s in self.boxes)\n line = 'x'.join(['-'*(width*3)]*3)\n for r in self.rows:\n print(''.join(self.values[r+c].center(width)+('|' if c in '36' else '')\n for c in self.cols))\n if r in 'CF': print(line)\n print", "def display(values):\n width = 1 + max(len(values[s]) for s in boxes)\n line = '+'.join(['-'*width*3]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '') for c in cols))\n if r in 'CF': print(line)\n return", "def display(values):\n width = 1 + max(len(values[s]) for s in boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n return", "def display(values):\r\n width = 1 + max(len(values[s]) for s in boxes)\r\n line = '+'.join(['-' * (width * 3)] * 3)\r\n for r in rows:\r\n print(''.join(values[r + c].center(width) + ('|' if c in '36' else '')\r\n for c in cols))\r\n if r in 'CF': print(line)\r\n return", "def display(values):\n width = 1+max(len(values[s]) for s in boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n return", "def display(values):\n width = 1+max(len(values[s]) for s in boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n return", "def display(values):\n width = 1+max(len(values[s]) for s in boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n return", "def display(values):\n width = 1+max(len(values[s]) for s in boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n return", "def display(values):\n width = 1+max(len(values[s]) for s in boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n return", "def display(values):\n width = 1+max(len(values[s]) for s in boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n return", "def display(values):\n width = 1 + max(len(values[s]) for s in boxes)\n line = '+'.join(['-' * (width * 3)] * 3)\n for r in rows:\n print(''.join(values[r + c].center(width) + ('|' if c in '36' else '')\n for c in cols))\n if r in 'CF':\n print(line)\n return", "def display(values):\n width = 1+max(len(values[s]) for s in boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n print", "def reformat():\n toolkit.reformat()", "def display(values):\n width = 1 + max(len(values[s]) for s in boxes)\n line = '+'.join(['-' * (width * 3)] * 3)\n for r in rows:\n print(''.join(values[r + c].center(width) + ('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n print", "def display(values):\n width = 1 + max(len(values[s]) for s in boxes)\n line = '+'.join(['-' * (width * 3)] * 3)\n for r in rows:\n print(''.join(values[r + c].center(width) + ('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n print", "def box(message, *style, **kwds):\n if style and style[0] in ('flag', 'box', 'overline', 'underline', 'lined'):\n border = style[0]\n chars = style[1:]\n else:\n border = 'box'\n chars = style\n lines = message.split('\\n')\n width = max([len(re.sub('\\x1b\\[[\\d;]*\\w', '', l)) for l in lines])\n if not chars:\n top = bottom = '-'\n left = right = '|'\n elif len(chars) == 1:\n top = bottom = left = right = chars[0]\n elif len(chars) == 2:\n top = bottom = chars[0]\n left = right = chars[1]\n elif len(chars) == 4:\n top, bottom, left, right = chars\n else:\n raise ScriptionError('if box chars specified, must be a single item for use as all four, two items for use as top/bottom and left/right, or four items')\n # calculate rule now\n rule = '-' * width\n #\n padding = 0\n if border == 'box':\n padding = 1\n width += len(left) + len(right) + 2 * padding\n elif border == 'flag':\n padding = 1\n width += len(left) + 2 * padding\n # make sure right is not used\n right = ''\n else:\n # make sure left and right are not used\n left = right = ''\n #\n times, remainder = divmod(width, len(top))\n top_line = top * times\n if remainder:\n top_line += top[-remainder:]\n #\n times, remainder = divmod(width, len(bottom))\n bottom_line = bottom * times\n if remainder:\n bottom_line += bottom[-remainder:]\n #\n box = []\n padding = padding * ' '\n if border != 'underline':\n box.append(top_line)\n for line in lines:\n if line == '---':\n line = rule\n leading = ('%(left)s%(padding)s%(line)s' %\n {'left': left, 'padding': padding, 'line':line}\n )\n line = '%-*s%s' % (width-len(right), leading, right)\n box.append(line)\n if border != 'overline':\n box.append(bottom_line)\n return '\\n'.join(box)", "def populate_fmt_box(self):\n if not self.caps_desc:\n log.error(\"No caps description available\")\n return\n\n self.format_combo.clear()\n\n self.format_combo.addItems(self.caps_desc.get_fmt_list())", "def alter_text_format(self):\n service = self.slides_service\n requests = [\n {\n 'updateParagraphStyle': {\n 'objectId': self.song_numbers_box_id,\n 'style': {\n 'alignment': 'CENTER'\n },\n 'fields': 'alignment'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.song_numbers_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.TITLE_FONT_SIZE, # numbers slightly larger than lyrics\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.left_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.FONT_SIZE,\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.right_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.FONT_SIZE,\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n }\n ]\n body = {\n 'requests': requests\n }\n response = service.presentations().batchUpdate(presentationId=self.presentation_id, body=body).execute()\n print(f'Updated the text style for shape with ID: {self.left_box_id}')\n return response", "def __display(self,state: dict):\n width = 1+max(len(state[s]) for s in self.__boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in self.__rows:\n print(''.join( state[r+c].center(width)+ ('|' if c in '36' else '')\n for c in self.__cols))\n if r in 'CF': print(line)", "def format_blocks(self):\n\n block_text = []\n for el, text in self._block_text.items():\n self.soft_break(el, text)\n content = ''.join(text)\n if content:\n block_text.append((content, self.additional_context + self.construct_selector(el)))\n return block_text", "def pretty(sbox, border=True):\n\n p = ''\n if border:\n # List of Columns\n p += ' '\n for i in range(16):\n p += '%02x' % i + ' '\n p += '\\n'\n\n for i in range(52):\n p += '-'\n p += '\\n'\n\n # Row\n for i in range(16):\n if border:\n p += '%02x' % i + ' | '\n\n # Entries\n for j in range(16):\n p += '%02x' % sbox[16*i+j] + ' '\n p += '\\n'\n\n return p.upper()", "def format(self, data):\r\n for name, value in sorted(data.items()):\r\n full_text = ': {name} : {value}'.format(\r\n name=name,\r\n value=value,\r\n )\r\n wrapped_text = textwrap.fill(\r\n full_text,\r\n initial_indent='',\r\n subsequent_indent=' ',\r\n width=self.max_width,\r\n )\r\n yield wrapped_text + '\\n'", "def gt_multi_txt(path, bboxes): \n \n W, H = Image.open(path).size\n\n lines_out=[]\n for obj_info in bboxes:\n label = 0 #obj_info['name']\n xmin, ymin, xmax, ymax = obj_info['bbox']\n\n cx = '%.3f' % np.clip(((xmax+xmin)/2)/W,0,1)\n cy = '%.3f' % np.clip(((ymax+ymin)/2)/H,0,1)\n w = '%.3f' % np.clip((xmax-xmin)/W ,0,1)\n h = '%.3f' % np.clip((ymax-ymin)/H ,0,1)\n\n lines_out.append(' '.join([str(label),cx,cy,w,h,'\\n']))\n\n return lines_out", "def draw_box(stdscr, y, x, height, width, mode=0):\n if mode == 0:\n stdscr.addstr(y, x, \"┌\" + \"─\" * (width - 1) + \"┐\")\n stdscr.addstr(y + height, x, \"└\" + \"─\" * (width - 1) + \"┘\")\n for i in range(y + 1, y + height):\n stdscr.addstr(i, x, \"│\")\n stdscr.addstr(i, x + width, \"│\")\n if mode == 1:\n stdscr.addstr(y, x, \"╭\" + \"─\" * (width - 1) + \"╮\")\n stdscr.addstr(y + height, x, \"╰\" + \"─\" * (width - 1) + \"╯\")\n for i in range(y + 1, y + height):\n stdscr.addstr(i, x, \"│\")\n stdscr.addstr(i, x + width, \"│\")\n if mode == 2:\n stdscr.addstr(y, x, \"╔\" + \"═\" * (width - 1) + \"╗\")\n stdscr.addstr(y + height, x, \"╚\" + \"═\" * (width - 1) + \"╝\")\n for i in range(y + 1, y + height):\n stdscr.addstr(i, x, \"║\")\n stdscr.addstr(i, x + width, \"║\")", "def print_upper_box_line():\n print_indentation()\n print(STYLES[parameters[\"Style\"]][\"Upper left corner\"], end=\"\")\n for _ in range(text_width_with_spaces):\n print(STYLES[parameters[\"Style\"]][\"Horizontal line\"], end=\"\")\n print(STYLES[parameters[\"Style\"]][\"Upper right corner\"])", "def formatting_text_box(ax, parameters, formatting_right):\n\n font_colour = '#9099A2' # Light grey\n\n # Set text box to be white with 50% transparency\n # will not be seen unless overlapping data\n text_box_patch = dict(\n boxstyle='round',\n facecolor='white',\n alpha=0.5,\n edgecolor='white')\n\n # Text box position to avoid overlap\n # with graphs data.\n if formatting_right:\n box_vertical = 0.83\n box_horizontal = 0.845\n else:\n box_vertical = 0.83\n box_horizontal = 0.05\n\n ax.text(\n box_horizontal, box_vertical, parameters,\n transform=ax.transAxes, fontsize=12,\n verticalalignment='top', color=font_colour,\n bbox=text_box_patch\n )\n\n return ax", "def print_contents(entry_box):\n contents_of_entry_box = entry_box.get()\n print(contents_of_entry_box)", "def boxTextAt( text = \"\", lboxchar = \" \", rboxchar = \" \", paddingchar = \" \", linewidth = 78 ):\n\n ansistring_text = stringExtends.ansiStringClass( \"\" )\n if isinstance( text, ( str, unicode ) ):\n ansistring_text.Text = text\n\n ansistring_lboxchar = stringExtends.ansiStringClass( default_display_vars.borderChar_Left )\n if isinstance( lboxchar, ( str, unicode ) ):\n ansistring_lboxchar.Text = lboxchar\n\n ansistring_rboxchar = stringExtends.ansiStringClass( default_display_vars.borderChar_Right )\n if isinstance( rboxchar, (str, unicode) ) :\n ansistring_rboxchar.Text = rboxchar\n\n ansistring_paddingchar = stringExtends.ansiStringClass( default_display_vars.boxText_padding )\n if isinstance( paddingchar, (str, unicode) ) :\n ansistring_paddingchar.Text = paddingchar\n\n line_width = 78\n if isinstance( linewidth, ( int, float ) ):\n line_width = linewidth\n\n r = stringExtends.ansiStringClass( '' )\n for line in ansistring_text.ansiTextWrap( line_width - ( ansistring_lboxchar.rawTextLen() + ansistring_rboxchar.rawTextLen() ) ):\n ansistring_line = stringExtends.ansiStringClass( line )\n\n pad_len = line_width - ( ansistring_lboxchar.rawTextLen() + ansistring_rboxchar.rawTextLen() + ansistring_line.rawTextLen() )\n\n this_pad_string = ( ansistring_paddingchar.ansiTextFormat() * int( math.floor( pad_len / ansistring_paddingchar.rawTextLen() ) ) )\n\n r.Text += ansistring_lboxchar.ansiTextFormat() + ansistring_line.ansiTextFormat() + this_pad_string\n if ( r.rawTextLen() + ansistring_rboxchar.ansiTextLen() ) < line_width:\n r.Text += ansistring_paddingchar.ansiSlice( 0, ( line_width - r.rawTextLen() ) - ansistring_rboxchar.ansiTextLen() )\n r.Text += ansistring_rboxchar.ansiTextFormat() + \"\\n\"\n\n r.Text = r.Text[:-1]\n return r.Text", "def box(context, nodelist, classname=None):\n return render_to_string('deco/box.html', {\n 'classname': classname or \"\",\n 'content': nodelist.render(context)\n })", "def _format_text(self, text) :\n text_width = self.width - self.current_indent\n indent = \" \"*self.current_indent\n output_text = []\n paragraphs = text.split('\\n')\n for p in paragraphs :\n output_text.append(textwrap.fill(p,\n text_width,\n initial_indent=indent,\n subsequent_indent=indent))\n return '\\n'.join(output_text)", "def get_formatted_text(self, n_cols):", "def style(self, box_style=None, border_visible=False, border_colour='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight=''):\n format_box(self, box_style, border_visible, border_colour, border_style,\n border_width, border_radius, padding, margin)\n format_font(self, font_family, font_size, font_style, font_weight)\n format_font(self.feature_radiobuttons, font_family, font_size,\n font_style, font_weight)\n format_font(self.no_options_widget, font_family, font_size, font_style,\n font_weight)\n format_font(self.preview_input_latex, font_family, font_size,\n font_style, font_weight)\n format_font(self.preview_output_latex, font_family, font_size,\n font_style, font_weight)\n format_font(self.preview_time_latex, font_family, font_size, font_style,\n font_weight)\n self.dsift_options_widget.style(\n box_style=None, border_visible=False, margin='0.2cm',\n font_family=font_family, font_size=font_size, font_style=font_style,\n font_weight=font_weight)\n self.hog_options_widget.style(\n box_style=None, border_visible=False, margin='0.2cm',\n font_family=font_family, font_size=font_size, font_style=font_style,\n font_weight=font_weight)\n self.igo_options_widget.style(\n box_style=None, border_visible=False, margin='0.2cm',\n font_family=font_family, font_size=font_size, font_style=font_style,\n font_weight=font_weight)\n self.lbp_options_widget.style(\n box_style=None, border_visible=False, margin='0.2cm',\n font_family=font_family, font_size=font_size, font_style=font_style,\n font_weight=font_weight)\n self.daisy_options_widget.style(\n box_style=None, border_visible=False, margin='0.2cm',\n font_family=font_family, font_size=font_size, font_style=font_style,\n font_weight=font_weight)\n self.no_options_widget.margin = '0.2cm'", "def text_box(msg, border='#'):\n assert isinstance(border, str) and len(border) == 1\n msg_line = '{} {} {}'.format(border, msg, border)\n top_bot_line = border * len(msg_line)\n\n sio = StringIO.StringIO()\n print >> sio, top_bot_line\n print >> sio, msg_line\n print >> sio, top_bot_line\n return sio.getvalue()", "def format_body(self):\n mt = deque(str(self.movetext).split(' ') + [])\n out = mt.popleft()\n ll = len(out)\n while True:\n if len(mt) is 0:\n break\n\n n = mt.popleft()\n # If the current line length + space + character is less than\n # 80 chars long\n if ll + len(n) + 1 < 80:\n to_add = \" \" + n\n out += \" \" + n\n ll += len(to_add)\n else:\n out += \"\\n\" + n\n ll = len(n)\n return out + str(self.score)", "def print_wrapper(string, item):\n imgtype = item['imgtype']\n obj = item['object']\n\n if len(obj)>=4 and obj[0:4]=='bias':\n # bias images, use dim (2)\n return '\\033[2m'+string.replace('\\033[0m', '')+'\\033[0m'\n\n elif imgtype=='sci':\n # sci images, use highlights (1)\n return '\\033[1m'+string.replace('\\033[0m', '')+'\\033[0m'\n\n elif len(obj)>=8 and obj[0:8]=='flatlamp':\n # flat images, analyze nsat\n nsat_1 = item['nsat_1']\n nsat_2 = item['nsat_2']\n nsat_3 = item['nsat_3']\n q95_1 = item['q95_1']\n q95_2 = item['q95_2']\n q95_3 = item['q95_3']\n q_lst = [q95_1 if q95_1 < 6e4 else -1,\n q95_2 if q95_2 < 6e4 else -1,\n q95_3 if q95_3 < 6e4 else -1]\n\n maxccd = np.argmax(q_lst)\n\n if max(q_lst)<0:\n # all CCDs are saturated\n return string\n\n elif 'quartz1' in obj and maxccd == 0:\n # quartz1 for UV, use light magenta (95)\n return '\\033[95m'+string.replace('\\033[0m', '')+'\\033[0m'\n\n elif maxccd == 0:\n # blue flat, use light blue (94)\n return '\\033[94m'+string.replace('\\033[0m', '')+'\\033[0m'\n\n elif maxccd == 1:\n # green flat, use light green (92)\n return '\\033[92m'+string.replace('\\033[0m', '')+'\\033[0m'\n\n elif maxccd == 2:\n # red flat, use light red (91)\n return '\\033[91m'+string.replace('\\033[0m', '')+'\\033[0m'\n\n else:\n # no idea\n return string\n\n elif len(obj)>=7 and obj[0:7]=='arclamp':\n # arc lamp, use light yellow (93)\n return '\\033[93m'+string.replace('\\033[0m', '')+'\\033[0m'\n else:\n return string", "def format_text(self):\n for line, _ in enumerate(self.readlines()[:-1]):\n self.root.colour_line(line + 1)", "def the_display(self):\r\n return f\"\"\"\r\n {self.display[0]}\\n\r\n {self.display[1]}\\n\r\n {self.display[2]}\\n\r\n {self.display[3]}\\n\r\n {self.display[4]}\\n\r\n \"\"\"", "def formatall(obj):\n result = \"\"\n if isinstance(obj, list):\n# i = 0\n for obj in obj:\n #printf(\">>> [%d] >>> \", i)\n result += format(obj)\n result += \"\\n\"\n# i += 1\n return result\n if isinstance(obj, dict):\n for key, value in obj.items():\n result += \"%-15s : \" % key\n result += format(value)\n result += \"\\n\"\n return result\n return format(obj)", "def dump(box, stream=None, level=0, indent=\" \", recursive=True): # pragma: no cover\n if stream is None:\n stream = sys.stdout\n stream.write(indent * level)\n stream.write(\"%s.%s\\n\" % (box.__module__, box.__class__.__name__))\n\n for key, value in sorted(box.__dict__.items()):\n if key in [\"children\"]:\n continue\n if key == \"style\":\n stream.write(indent * (level + 1))\n stream.write(\"style:\\n\")\n for pair in sorted(box.style.items()):\n stream.write(indent * (level + 2))\n stream.write(\"%s: %s\\n\" % pair)\n elif key in [\"text\"]:\n stream.write(indent * (level + 1))\n stream.write(\"%s: %r\\n\" % (key, value))\n else:\n stream.write(indent * (level + 1))\n stream.write(\"%s: %s\\n\" % (key, value))\n\n stream.write(\"\\n\")\n\n if recursive and hasattr(box, \"children\"):\n for child in box.children:\n dump(box=child, stream=stream, level=level+1, indent=indent)", "def print_box(*msg, width='console', align='^', start=True, end=True, **kwargs):\n if width == 'content':\n width = max([len(m) for m in msg]) + 2\n elif width == 'console':\n width = get_console_width()\n if start:\n cprint('┏' + '━'*(width - 2) + '┓', **kwargs)\n for m in msg:\n cprint(f'┃{m:{align}{width - 2}}┃', **kwargs)\n if end:\n cprint('┗' + '━'*(width - 2) + '┛', **kwargs)\n return", "def style(self, box_style=None, border_visible=False, border_color='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight=''):\n _format_box(self, box_style, border_visible, border_color, border_style,\n border_width, border_radius, padding, margin)\n _format_font(self, font_family, font_size, font_style, font_weight)\n _format_font(self.feature_radiobuttons, font_family, font_size,\n font_style, font_weight)\n _format_font(self.no_options_widget, font_family, font_size, font_style,\n font_weight)\n _format_font(self.preview_input_latex, font_family, font_size,\n font_style, font_weight)\n _format_font(self.preview_output_latex, font_family, font_size,\n font_style, font_weight)\n _format_font(self.preview_time_latex, font_family, font_size,\n font_style, font_weight)\n self.dsift_options_widget.style(\n box_style=None, border_visible=False, margin='0.2cm',\n font_family=font_family, font_size=font_size, font_style=font_style,\n font_weight=font_weight)\n self.hog_options_widget.style(\n box_style=None, border_visible=False, margin='0.2cm',\n font_family=font_family, font_size=font_size, font_style=font_style,\n font_weight=font_weight)\n self.igo_options_widget.style(\n box_style=None, border_visible=False, margin='0.2cm',\n font_family=font_family, font_size=font_size, font_style=font_style,\n font_weight=font_weight)\n self.lbp_options_widget.style(\n box_style=None, border_visible=False, margin='0.2cm',\n font_family=font_family, font_size=font_size, font_style=font_style,\n font_weight=font_weight)\n self.daisy_options_widget.style(\n box_style=None, border_visible=False, margin='0.2cm',\n font_family=font_family, font_size=font_size, font_style=font_style,\n font_weight=font_weight)\n self.no_options_widget.margin = '0.2cm'", "def style(self, box_style=None, border_visible=False, border_colour='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight=''):\n format_box(self, box_style, border_visible, border_colour, border_style,\n border_width, border_radius, padding, margin)\n format_font(self, font_family, font_size, font_style, font_weight)\n for i in range(self.n_lines):\n format_font(self.latex_texts[i], font_family, font_size,\n font_style, font_weight)", "def formatted(self) -> str:\r\n ...", "def style(self, box_style=None, border_visible=False, border_color='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight=''):\n _format_box(self, box_style, border_visible, border_color, border_style,\n border_width, border_radius, padding, margin)\n _format_font(self, font_family, font_size, font_style, font_weight)\n _format_font(self.file_format_select, font_family, font_size,\n font_style, font_weight)\n _format_font(self.dpi_text, font_family, font_size, font_style,\n font_weight)\n _format_font(self.orientation_dropdown, font_family, font_size,\n font_style, font_weight)\n _format_font(self.papertype_select, font_family, font_size,\n font_style, font_weight)\n _format_font(self.transparent_checkbox, font_family, font_size,\n font_style, font_weight)\n _format_font(self.pad_inches_text, font_family, font_size, font_style,\n font_weight)\n _format_font(self.filename_text, font_family, font_size, font_style,\n font_weight)\n _format_font(self.overwrite_checkbox, font_family, font_size,\n font_style, font_weight)\n _format_font(self.save_button, font_family, font_size, font_style,\n font_weight)\n self.facecolour_widget.style(\n box_style=None, border_visible=False, font_family=font_family,\n font_size=font_size, font_weight=font_weight, font_style=font_style)\n self.edgecolour_widget.style(\n box_style=None, border_visible=False, font_family=font_family,\n font_size=font_size, font_weight=font_weight, font_style=font_style)", "def msg(text):\n for line in text.splitlines():\n if JS.alignment == \"left\":\n print(demarkup(line))\n elif JS.alignment == \"center\":\n print(demarkup(line).center(get_terminal_size()[0] - 1))\n else:\n print(demarkup(line).rjust(get_terminal_size()[0] - 1))", "def style(self, box_style=None, border_visible=False, border_colour='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight=''):\n format_box(self, box_style, border_visible, border_colour, border_style,\n border_width, border_radius, padding, margin)\n format_font(self, font_family, font_size, font_style, font_weight)\n format_font(self.file_format_select, font_family, font_size, font_style,\n font_weight)\n format_font(self.dpi_text, font_family, font_size, font_style,\n font_weight)\n format_font(self.orientation_dropdown, font_family, font_size,\n font_style, font_weight)\n format_font(self.papertype_select, font_family, font_size, font_style,\n font_weight)\n format_font(self.transparent_checkbox, font_family, font_size,\n font_style, font_weight)\n format_font(self.pad_inches_text, font_family, font_size, font_style,\n font_weight)\n format_font(self.filename_text, font_family, font_size, font_style,\n font_weight)\n format_font(self.overwrite_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.save_button, font_family, font_size, font_style,\n font_weight)\n self.facecolour_widget.style(\n box_style=None, border_visible=False, font_family=font_family,\n font_size=font_size, font_weight=font_weight, font_style=font_style)\n self.edgecolour_widget.style(\n box_style=None, border_visible=False, font_family=font_family,\n font_size=font_size, font_weight=font_weight, font_style=font_style)", "def __format_display(file_contents: str) -> str:\n\n new_file_contents = file_contents\n\n for match in re.finditer(COBOL_FORMAT_DISPLAY_REGEX, file_contents):\n match_str = match_to_str(match)\n\n # Skip \"DISPLAY\" statements within \"IF-ELSE\" blocks\n if re.search(re.compile(r'\\s+ELSE\\s+'), match_str) is not None:\n continue\n\n new_str = match_str.replace('\\n', ' ')\n new_file_contents = new_file_contents.replace(match_str, new_str)\n\n return new_file_contents", "def reformat(ctx):\n pass", "def _text_formatting(bs4_tag):\n return bs4_tag.get_text().replace('\\n', '')", "def _formatarCorpo(self, corpo, editor=None):\n if not corpo:\n corpo = \"\"\n if not editor:\n corpo = corpo.replace(\"\\n\", \"<br/>\")\n return corpo", "def _format(self, composite, level=0):\n lines = []\n if isinstance(composite, dict) and '__type__' in composite.keys():\n type_ = composite['__type__']\n assert(type_ in COMPOSITE_NAMES.union(SINGLETON_COMPOSITE_NAMES))\n is_hidden = False\n s = self.whitespace(level, 0) + type_ .upper() \n lines.append(s)\n # we can now filter out the type property so the rest of the \n # values are displayed\n items = ((k, v) for k, v in composite.items() if k != '__type__')\n else:\n # hidden container\n assert(len(composite.keys()) == 1)\n is_hidden = True\n items = enumerate(composite.values()[0])\n\n for key, value in items:\n \n if self.is_hidden_container(key, value): # HiddenContainer\n # now recursively print all the items in the container\n if self.is_block_list(value):\n k = self.singular(key)\n lines += self.process_list(k, value, level)\n else:\n for v in value:\n lines += self._format(v, level + 1)\n\n elif self.is_composite(value): # Container\n lines += self._format(value, level + 1)\n else:\n if key in SINGLETON_COMPOSITE_NAMES: \n lines += self.process_dict(key, value, level)\n elif isinstance(value, dict): \n\n if key == \"config\":\n # config declaration allows for pairs of values\n value = [\"%s %s\" % (self.format_key(k), self.format_attribute(v)) for k,v in value.items()]\n key = self.format_key(key) # format the \"parent\" key\n for v in value:\n # keys and values are already formatted so do not format them again \n lines.append(self.__format_line(self.whitespace(level, 1), key, v))\n elif isinstance(value, list): \n if self.is_list_of_lists(value):\n # value is list of lists, so create composite type for each list e.g. several POINTS in a FEATURE\n for l in value:\n lines += self.process_list(key, [l], level)\n else:\n lines += self.process_list(key, value, level)\n else:\n comp_type = composite.get(\"__type__\", \"\") \n if comp_type == \"metadata\":\n # don't add quotes to key or value, but standardise them if present\n key = self.standardise_quotes(key)\n value = self.standardise_quotes(value)\n lines.append(self.__format_line(self.whitespace(level, 1), key, value))\n else:\n lines.append(self.format_line(self.whitespace(level, 1), key, value))\n\n if not is_hidden: # Container\n # close the container block with an END \n s = self.whitespace(level, 0) + self.end\n lines.append(s)\n\n return lines", "def print_wrapper(string, item):\n imgtype = item['imgtype']\n objectname = item['object'].strip().lower()\n\n if imgtype=='cal' and objectname=='bias':\n # bias images, use dim (2)\n return '\\033[2m'+string.replace('\\033[0m', '')+'\\033[0m'\n\n elif imgtype=='sci':\n # sci images, use highlights (1)\n return '\\033[1m'+string.replace('\\033[0m', '')+'\\033[0m'\n\n elif imgtype=='cal':\n if objectname == 'thar':\n # arc lamp, use light yellow (93)\n return '\\033[93m'+string.replace('\\033[0m', '')+'\\033[0m'\n else:\n return string\n #elif (item['fiber_A'], item['fiber_B']) in [('ThAr', ''),\n # ('', 'ThAr'),\n # ('ThAr', 'ThAr')]:\n # # arc lamp, use light yellow (93)\n # return '\\033[93m'+string.replace('\\033[0m', '')+'\\033[0m'\n #else:\n # return string\n else:\n return string", "def _format_line_for_sidebar(self, i):\n check = '\\u2714 ' if self.model.line_has_audio(i) else ' '\n return [('check', check), \" {}. {}\".format(i, self.model.get_lines()[i])]", "def style(self, box_style=None, border_visible=False, border_colour='black',\n border_style='dashed', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight='', bboxes_box_style=None,\n bboxes_border_visible=False, bboxes_border_colour='black',\n bboxes_border_style='solid', bboxes_border_width=1,\n bboxes_border_radius=0, bboxes_padding=0, bboxes_margin=0,\n patches_box_style=None, patches_border_visible=False,\n patches_border_colour='black', patches_border_style='solid',\n patches_border_width=1, patches_border_radius=0,\n patches_padding=0, patches_margin=0):\n format_box(self, box_style, border_visible, border_colour, border_style,\n border_width, border_radius, padding, margin)\n format_font(self, font_family, font_size, font_style, font_weight)\n format_font(self.offset_dropdown, font_family, font_size, font_style,\n font_weight)\n format_font(self.render_patches_checkbox, font_family, font_size,\n font_style, font_weight)\n format_font(self.render_centers_checkbox, font_family, font_size,\n font_style, font_weight)\n format_font(self.background_toggle, font_family, font_size,\n font_style, font_weight)\n format_font(self.background_title, font_family, font_size,\n font_style, font_weight)\n self.bboxes_line_options_wid.style(\n box_style=bboxes_box_style, border_visible=bboxes_border_visible,\n border_colour=bboxes_border_colour, border_style=bboxes_border_style,\n border_width=bboxes_border_width,\n border_radius=bboxes_border_radius, padding=bboxes_padding,\n margin=bboxes_margin, font_family=font_family, font_size=font_size,\n font_style=font_style, font_weight=font_weight)\n self.slicing_wid.style(\n box_style=patches_box_style, text_box_style=None,\n text_box_background_colour=None, text_box_width=None,\n font_family=font_family, font_size=font_size, font_style=font_style,\n font_weight=font_weight)\n format_box(self.offset_patches_box, box_style=patches_box_style,\n border_visible=patches_border_visible,\n border_colour=patches_border_colour,\n border_style=patches_border_style,\n border_width=patches_border_width,\n border_radius=patches_border_radius,\n padding=patches_padding, margin=patches_margin)", "def style(self, box_style=None, border_visible=False, border_color='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight='', slider_width='', slider_handle_colour=None,\n slider_bar_colour=None, buttons_style=''):\n format_box(self, box_style, border_visible, border_color, border_style,\n border_width, border_radius, padding, margin)\n format_font(self, font_family, font_size, font_style, font_weight)\n format_font(self.reset_button, font_family, font_size, font_style,\n font_weight)\n format_font(self.plot_button, font_family, font_size, font_style,\n font_weight)\n if self.mode == 'single':\n format_slider(self.slider, slider_width=slider_width,\n slider_handle_colour=slider_handle_colour,\n slider_bar_colour=slider_bar_colour,\n slider_text_visible=True)\n format_font(self.slider, font_family, font_size, font_style,\n font_weight)\n format_font(self.dropdown_params, font_family, font_size,\n font_style, font_weight)\n else:\n for sl in self.sliders:\n format_slider(sl, slider_width=slider_width,\n slider_handle_colour=slider_handle_colour,\n slider_bar_colour=slider_bar_colour,\n slider_text_visible=True)\n format_font(sl, font_family, font_size, font_style,\n font_weight)\n self.reset_button.button_style = buttons_style\n self.plot_button.button_style = buttons_style", "def format(self):\n ...", "def style(self, box_style=None, border_visible=False, border_color='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight='', slider_width='', slider_colour=None):\n _format_box(self, box_style, border_visible, border_color, border_style,\n border_width, border_radius, padding, margin)\n self.single_slider.width = slider_width\n self.multiple_slider.width = slider_width\n _format_font(self, font_family, font_size, font_style, font_weight)\n _format_font(self.mode_radiobuttons, font_family, font_size, font_style,\n font_weight)\n _format_font(self.single_slider, font_family, font_size, font_style,\n font_weight)\n _format_font(self.multiple_slider, font_family, font_size, font_style,\n font_weight)\n _format_font(self.masked_checkbox, font_family, font_size, font_style,\n font_weight)\n _format_font(self.rgb_checkbox, font_family, font_size, font_style,\n font_weight)\n _format_font(self.sum_checkbox, font_family, font_size, font_style,\n font_weight)\n _format_font(self.glyph_checkbox, font_family, font_size, font_style,\n font_weight)\n _format_font(self.glyph_use_negative_checkbox, font_family, font_size,\n font_style, font_weight)\n _format_font(self.glyph_block_size_text, font_family, font_size,\n font_style, font_weight)\n self.single_slider.slider_color = slider_colour\n self.single_slider.background_color = slider_colour\n self.multiple_slider.slider_color = slider_colour\n self.multiple_slider.background_color = slider_colour", "def style(self, box_style=None, border_visible=False, border_color='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight=''):\n _format_box(self, box_style, border_visible, border_color, border_style,\n border_width, border_radius, padding, margin)\n _format_font(self, font_family, font_size, font_style, font_weight)\n for i in range(self.n_lines):\n _format_font(self.latex_texts[i], font_family, font_size,\n font_style, font_weight)", "def __str__(self):\n boxStrings = [str(box) for box in self.boxes]\n connectionStrings = [str(connection) for connection in self.connections]\n\n return '%s\\n{\\n %s\\n\\n %s\\n}' % (\n self.name,\n '\\n '.join(boxStrings),\n '\\n '.join(connectionStrings))", "def style(self, box_style=None, border_visible=False, border_colour='black',\n border_style='solid', border_width=1, border_radius=0, padding=0,\n margin=0, font_family='', font_size=None, font_style='',\n font_weight='', slider_width='4cm'):\n format_box(self, box_style, border_visible, border_colour, border_style,\n border_width, border_radius, padding, margin)\n self.channels_wid.single_slider.width = slider_width\n self.channels_wid.multiple_slider.width = slider_width\n format_font(self, font_family, font_size, font_style, font_weight)\n format_font(self.rgb_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.masked_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.sum_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.glyph_checkbox, font_family, font_size, font_style,\n font_weight)\n format_font(self.glyph_block_size_text, font_family, font_size,\n font_style, font_weight)\n format_font(self.glyph_use_negative_checkbox, font_family, font_size,\n font_style, font_weight)\n format_font(self.no_options_latex, font_family, font_size, font_style,\n font_weight)\n self.channels_wid.style(\n box_style=box_style, border_visible=False, text_box_style=None,\n text_box_background_colour=None, text_box_width=None,\n font_family=font_family, font_size=font_size, font_style=font_style,\n font_weight=font_weight)", "def build_formatting_model(node, root=None):\n if node.tag == \"body\":\n root = Layout(node.get(\"style\"))\n\n if node.tag in [\"body\", \"b\", \"code\", \"i\", \"em\", \"small\", \"span\", \"strong\", \"sub\", \"sup\"]:\n if node.text:\n root.children.append(TextBox(node.text, node.get(\"style\")))\n for child in node:\n build_formatting_model(child, root)\n if child.tail:\n root.children.append(TextBox(child.tail, node.get(\"style\"))) # Note: the tail doesn't get the child's style\n return root\n\n if node.tag == \"a\":\n root.children.append(PushHyperlink(node.get(\"href\"), node.get(\"target\", None), node.get(\"style\")))\n if node.text:\n root.children.append(TextBox(node.text, node.get(\"style\")))\n for child in node:\n build_formatting_model(child, root)\n if child.tail:\n root.children.append(TextBox(child.tail, node.get(\"style\"))) # Note: the tail doesn't get the child's style\n root.children.append(PopHyperlink(node.get(\"style\")))\n return root\n\n if node.tag == \"marker\":\n root.children.append(MarkerBox(toyplot.marker.from_html(node), node.get(\"style\")))\n return root\n\n if node.tag == \"br\":\n root.children.append(_LineBreak())\n return root\n\n raise ValueError(\"Unknown tag: %s\" % node.tag) # pragma: no cover", "def _just_a_box():\n {\n \"pgf\": {\n \"texsystem\": \"\", \n \"debug\": \"\", \n \"rcfonts\": \"\", \n \"preamble\": \"\"\n }, \n \"verbose\": {\n \"level\": \"\", \n \"fileo\": \"\"\n }, \n \"figure\": {\n \"facecolor\": \"\", \n \"titlesize\": \"\", \n \"titleweight\": \"\", \n \"figsize\": \"\", \n \"max_open_warning\": \"\", \n \"edgecolor\": \"\", \n \"dpi\": \"\", \n \"frameon\": \"\", \n \"autolayout\": \"\"\n }, \n \"savefig\": {\n \"transparent\": \"\", \n \"facecolor\": \"\", \n \"pad_inches\": \"\", \n \"orientation\": \"\", \n \"format\": \"\", \n \"jpeg_quality\": \"\", \n \"directory\": \"\", \n \"edgecolor\": \"\", \n \"dpi\": \"\", \n \"frameon\": \"\", \n \"bbox\": \"\"\n }, \n \"text\": {\n \"color\": \"\", \n \"antialiased\": \"\", \n \"hinting\": \"\", \n \"hinting_factor\": \"\", \n \"usetex\": \"\"\n }, \n \"image\": {\n \"resample\": \"\", \n \"cmap\": \"\", \n \"composite_image\": \"\", \n \"interpolation\": \"\", \n \"lut\": \"\", \n \"aspect\": \"\", \n \"origin\": \"\"\n }, \n \"examples\": {\n \"directory\": \"\"\n }, \n \"axes3d\": {\n \"grid\": \"\"\n }, \n \"font\": {\n \"fantasy\": \"\", \n \"monospace\": \"\", \n \"weight\": \"\", \n \"serif\": \"\", \n \"family\": \"\", \n \"stretch\": \"\", \n \"variant\": \"\", \n \"cursive\": \"\", \n \"style\": \"\", \n \"size\": \"\"\n }, \n \"contour\": {\n \"corner_mask\": \"\", \n \"negative_linestyle\": \"\"\n }, \n \"backend\": {\n \"qt4\": \"\", \n \"qt5\": \"\"\n }, \n \"ps\": {\n \"useafm\": \"\", \n \"papersize\": \"\", \n \"usedistiller\": \"\", \n \"fonttype\": \"\"\n }, \n \"axes\": {\n \"labelweight\": \"\", \n \"facecolor\": \"\", \n \"axisbelow\": \"\", \n \"titlesize\": \"\", \n \"titleweight\": \"\", \n \"labelpad\": \"\", \n \"prop_cycle\": \"\", \n \"ymargin\": \"\", \n \"labelcolor\": \"\", \n \"unicode_minus\": \"\", \n \"hold\": \"\", \n \"autolimit_mode\": \"\", \n \"linewidth\": \"\", \n \"xmargin\": \"\", \n \"edgecolor\": \"\", \n \"titlepad\": \"\", \n \"labelsize\": \"\", \n \"grid\": \"\"\n }, \n \"markers\": {\n \"fillstyle\": \"\"\n }, \n \"hist\": {\n \"bins\": \"\"\n }, \n \"polaraxes\": {\n \"grid\": \"\"\n }, \n \"animation\": {\n \"convert_path\": \"\", \n \"frame_format\": \"\", \n \"embed_limit\": \"\", \n \"html\": \"\", \n \"html_args\": \"\", \n \"avconv_args\": \"\", \n \"codec\": \"\", \n \"bitrate\": \"\", \n \"ffmpeg_args\": \"\", \n \"ffmpeg_path\": \"\", \n \"convert_args\": \"\", \n \"writer\": \"\", \n \"avconv_path\": \"\"\n }, \n \"tk\": {\n \"window_focus\": \"\"\n }, \n \"hatch\": {\n \"color\": \"\", \n \"linewidth\": \"\"\n }, \n \"boxplot\": {\n \"bootstrap\": \"\", \n \"patchartist\": \"\", \n \"meanline\": \"\", \n \"vertical\": \"\", \n \"showfliers\": \"\", \n \"showbox\": \"\", \n \"notch\": \"\", \n \"showmeans\": \"\", \n \"whiskers\": \"\", \n \"showcaps\": \"\"\n }, \n \"docstring\": {\n \"hardcopy\": \"\"\n }, \n \"errorbar\": {\n \"capsize\": \"\"\n }, \n \"xtick\": {\n \"direction\": \"\", \n \"labelbottom\": \"\", \n \"alignment\": \"\", \n \"labeltop\": \"\", \n \"color\": \"\", \n \"bottom\": \"\", \n \"top\": \"\", \n \"labelsize\": \"\"\n }, \n \"ytick\": {\n \"direction\": \"\", \n \"right\": \"\", \n \"alignment\": \"\", \n \"color\": \"\", \n \"labelright\": \"\", \n \"labelleft\": \"\", \n \"left\": \"\", \n \"labelsize\": \"\"\n }, \n \"grid\": {\n \"alpha\": \"\", \n \"color\": \"\", \n \"linewidth\": \"\", \n \"linestyle\": \"\"\n }, \n \"mathtext\": {\n \"it\": \"\", \n \"fontset\": \"\", \n \"default\": \"\", \n \"tt\": \"\", \n \"cal\": \"\", \n \"sf\": \"\", \n \"bf\": \"\", \n \"rm\": \"\", \n \"fallback_to_cm\": \"\"\n }, \n \"path\": {\n \"simplify\": \"\", \n \"sketch\": \"\", \n \"snap\": \"\", \n \"effects\": \"\", \n \"simplify_threshold\": \"\"\n }, \n \"legend\": {\n \"shadow\": \"\", \n \"facecolor\": \"\", \n \"markerscale\": \"\", \n \"loc\": \"\", \n \"handleheight\": \"\", \n \"borderaxespad\": \"\", \n \"scatterpoints\": \"\", \n \"numpoints\": \"\", \n \"framealpha\": \"\", \n \"columnspacing\": \"\", \n \"handlelength\": \"\", \n \"fontsize\": \"\", \n \"edgecolor\": \"\", \n \"labelspacing\": \"\", \n \"frameon\": \"\", \n \"fancybox\": \"\", \n \"handletextpad\": \"\", \n \"borderpad\": \"\"\n }, \n \"svg\": {\n \"hashsalt\": \"\", \n \"image_inline\": \"\", \n \"fonttype\": \"\"\n }, \n \"lines\": {\n \"solid_capstyle\": \"\", \n \"markersize\": \"\", \n \"antialiased\": \"\", \n \"dotted_pattern\": \"\", \n \"scale_dashes\": \"\", \n \"solid_joinstyle\": \"\", \n \"color\": \"\", \n \"dashdot_pattern\": \"\", \n \"markeredgewidth\": \"\", \n \"dashed_pattern\": \"\", \n \"linewidth\": \"\", \n \"marker\": \"\", \n \"dash_joinstyle\": \"\", \n \"dash_capstyle\": \"\", \n \"linestyle\": \"\"\n }, \n \"patch\": {\n \"edgecolor\": \"\", \n \"antialiased\": \"\", \n \"facecolor\": \"\", \n \"linewidth\": \"\", \n \"force_edgecolor\": \"\"\n }, \n \"keymap\": {\n \"fullscreen\": \"\", \n \"quit\": \"\", \n \"grid_minor\": \"\", \n \"all_axes\": \"\", \n \"yscale\": \"\", \n \"quit_all\": \"\", \n \"save\": \"\", \n \"back\": \"\", \n \"zoom\": \"\", \n \"xscale\": \"\", \n \"home\": \"\", \n \"pan\": \"\", \n \"forward\": \"\", \n \"grid\": \"\"\n }, \n \"webagg\": {\n \"port_retries\": \"\", \n \"address\": \"\", \n \"open_in_browser\": \"\", \n \"port\": \"\"\n }, \n \"pdf\": {\n \"use14corefonts\": \"\", \n \"compression\": \"\", \n \"inheritcolor\": \"\", \n \"fonttype\": \"\"\n }, \n \"scatter\": {\n \"marker\": \"\"\n }\n }", "def display(self):\n lines = []\n for y in range(1, self.height+1):\n line = [\".\"] * self.width\n for x in range(1, self.width+1):\n if self.array[y][x]:\n line[x-1] = \"#\"\n lines.append(\"\".join(line))\n return \"\\n\".join(lines)", "def prettyPrint(description, ip_comp, host, width):\n value = (len(ip_comp) + len(host))\n #When printing values wider than the second column, split and print them\n if value > (int(width/3)):\n print(\"| \" + description.ljust(int(width/3)) + \" |\" ), \n i=0\n wrapped=textwrap.wrap(value, 60) \n for loop in wrapped:\n print(\"Fail point 3 inside loop\")\n if i == 0:\n print(loop + \"|\".rjust(int(width/3-(len(loop)))))\n else: \n print(\"| \".ljust(int(width/3+3)) + \" | \" + loop + \"|\".rjust(int(width/3-(len(loop)))))\n i=i+1\n else: \n print( \"| \" + description.ljust(int(width/3)) + \" | \" + ip_comp.rjust(int(width/3-6)) + \" | \" + host.rjust(int(width/3+2)) + \"|\")", "def format(prefix, core, sufix, crop, color):\n if color:\n txtgrn = '\\x1b[32m'\n txtrst = '\\x1b[0m'\n else:\n txtgrn = ''\n txtrst = ''\n if len(prefix + core + sufix) <= 50 or not crop:\n return prefix + txtgrn + core + txtrst + sufix\n left = 50\n left -= len(core)\n each = left / 4\n if len(prefix) >= each * 2:\n prefix = prefix[:each] + ' ... ' + prefix[-each:]\n if len(sufix) >= each * 2:\n sufix = sufix[:each] + ' ... ' + sufix[-each:]\n return prefix + txtgrn + core + txtrst + sufix", "def display(values):\n \n print(\"Displaying board\")\n for idx, unit_row in enumerate(unit_rows):\n row_string = \"\"\n for idx2, box in enumerate(unit_row):\n row_string += values[box]\n row_string += (9 - len(values[box])) * ' ' + ' '\n if ((idx2 + 1) % 3 == 0 and idx2 + 1 != 9):\n row_string += ' | '\n print('\\n' + row_string)\n if ((idx + 1) % 3 == 0 and idx + 1 != 9):\n print('\\n' + '-' * len(row_string))", "def draw(self):\n # 5 is the number of characters per box add one for the header column\n sepreator_line = \"-\" * (len(self.letters) + 1) * 5 + \"-\"\n print(sepreator_line)\n print(\n \"| \" + \"\".join([f\"| {letter} \" for letter in self.letters]) + \"|\")\n print(sepreator_line)\n for number in self.numbers:\n print(f\"| {number} \" + \"\".join(\n [f\"| {self.positions[letter + number]} \" for letter in self.letters]) + \"|\")\n print(sepreator_line)", "def __str__(self):\n inside_list = lambda _v, _h, a: any(x == _h and y == _v for y, x in a)\n resultant = ''\n for _v in range(1, self.size_v + 1):\n for _h in range(1, self.size_h + 1):\n if self.current_location[1] == _h and self.current_location[0] == _v:\n resultant = resultant + '@'\n elif inside_list(_v, _h, self.boxes):\n resultant = resultant + '$'\n elif inside_list(_v, _h, self.storage_locations):\n resultant = resultant + '.'\n elif inside_list(_v, _h, self.wall_squares):\n resultant = resultant + '#'\n else:\n resultant = resultant + ' '\n resultant = resultant + '\\n'\n\n return resultant", "def draw_boxes_info(image, current_data):\n\n font_position1 = (50, 600)\n font_position2 = (50, 650)\n font_scale = .4\n font_thickness = 1\n\n locations = current_data[\"locations\"] #returns x1, y1, x2, y2\n frame_num = \"Frame Number: \" + str(current_data[\"frame_num\"])\n\n for box in locations:\n box_text = (\"Box locations are x1: {0}, y1: {1}, x2: {2}, y2: {3}\").format(box[1],box[3],box[0],box[2])\n\n cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 3)\n cv2.putText(image, box_text, font_position1, cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255),\n font_thickness, cv2.LINE_AA)\n\n cv2.putText(image, frame_num, font_position2, cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255),\n font_thickness, cv2.LINE_AA)\n\n return image", "def blockprint(content, width=TERMINAL_CHARS):\n\n lines = content.split('\\n')\n print('_'*width)\n print('')\n for line in lines:\n p = line.strip()\n print(\"| \" + p)\n print('_'*width)", "def parse_obj(lt_objs,content):\n\n # loop over the object list\n\n\n for obj in lt_objs:\n\n # if it's a textbox, print text and location\n if isinstance(obj, pdfminer.layout.LTRect):\n content[0].append(int(obj.x0))\n content[0].append(int(obj.x1))\n content[1].append(int(obj.y1))\n content[1].append(int(obj.y0))", "def highlight_box(image, box, color = colors[0], width = defaultwidth,\n font = None):\n draw = ImageDraw.Draw(image)\n if not box.occluded:\n width = width * 2\n for i in range(width):\n draw.rectangle((box[0] + i, box[1] + i, box[2] - i, box[3] - i),\n outline=color)\n if font:\n ypos = box.ytl\n for attribute in box.attributes:\n attribute = str(attribute)\n size = draw.textsize(attribute, font = font)\n xpos = max(box.xtl - size[0] - 3, 0)\n\n draw.text((xpos, ypos+1), attribute,\n fill=\"black\", font=font)\n draw.text((xpos+1, ypos+1), attribute,\n fill=\"black\", font=font)\n draw.text((xpos+1, ypos), attribute,\n fill=\"black\", font=font)\n draw.text((xpos, ypos-1), attribute,\n fill=\"black\", font=font)\n draw.text((xpos-1, ypos-1), attribute,\n fill=\"black\", font=font)\n draw.text((xpos-1, ypos), attribute,\n fill=\"black\", font=font)\n\n draw.text((xpos, ypos), attribute,\n fill=\"white\", font=font)\n ypos += size[1] + 3\n return image", "def pformat(self, tree):\n return str(self.to_tree_text_block(tree))", "def _render_thing(self, thing):\n function = \"{:}\".format\n if (type(thing) in self.fmatdict):\n function = self.fmatdict[type(thing)]\n return function(thing).strip()", "def __makeFormatString(self):\n self.__formatString = \"\"\n for f in self.__columns:\n self.__formatString += \"%(\"+ f + \")-\" + str(self.__widths[f]) + \\\n \"s \"", "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def build_figlet_text(self, text):\n\n self.figlet_text = self.execute_shell([\"figlet\", \"-f\", self.font, '-w',\n str(self.geometry['x']), text])\n\n temp = self.figlet_text.split(\"\\n\")\n if len(temp) > 0:\n self.figlet_geometry['x'] = max([len(x) for x in temp])\n self.figlet_geometry['y'] = len(temp)\n\n # fix trailing spaces\n new_text = []\n for l in temp:\n if len(l) < self.figlet_geometry['x']:\n new_text.append(\n l + \" \" * (self.figlet_geometry['x'] - len(l)))\n else:\n new_text.append(l)\n self.figlet_text = \"\\n\".join(new_text)", "def cli(boxes, show_all):\n\n box_status = []\n for project, project_boxes in boxes.iteritems():\n for box in project_boxes:\n # add some nice colors to box status\n status = box.status()\n if not show_all and status == 'not created':\n continue\n color_status = {\n 'running': click.style('running', fg='green'),\n 'saved': click.style('saved', fg='blue'),\n 'poweroff': click.style('powered off', fg='yellow'),\n 'not created': click.style('not created', fg='red'),\n }.get(status, status)\n\n box_status.append({\n 'project': project.name(),\n 'name': box.name(),\n 'image': box.image(),\n 'status': color_status\n })\n\n box_status = sorted(box_status, key=_status_sort)\n status_table.echo(box_status)", "def _format(self):\n output = f\"\\n{color('>>> DUMP')} from {self.filename}: {color(f'L{self.line}')} in {color(f'{self.method}()')}\"\n\n for name, obj in self.objects.items():\n output += f\"\\n\\n{color(f' - {name}:')}\\n\"\n output += f\" {pformat(obj, width=110, indent=4)}\"\n\n output += color(\"\\n\\n<<< END\")\n return output", "def displayField(self):\n print(\"Field :\")\n for i in range(len(self.field)):\n currentSuit = Suit(i + 1)\n print(Bcolor.BOLD + Suit.toColor(currentSuit) + \"\\t\" + str(currentSuit), self.field[i], end=\"\\t\" + Bcolor.END)\n print()", "def render(self, mode: str = 'human'):\n # Note, this may seem badly aligned but it is correct!\n # It becomes aligned after substituting the variables by their real value\n content = \"\"\"\n x1 = {x1:.2f}\n ------------> City\n / x5 = {x5:.2f}\n / x2 = {x2:.2f} ----------------> Farms\n Q1 = {Q1} --------------> Dam ---------------- /\n S = {S} | /\n |-------------------------> Lower tributary\n | x6 = {x6:.2f}\n Q2 = {Q2} ------------------------------------\n \\\\ x3 = {x3:.2f}\n \\\\\n -------------> Farms\n x4 = {x4:.2f}\n \"\"\"\n print(\n content.format(\n Q1=self.Q1,\n Q2=self.Q2,\n S=self.S,\n x1=self.x[0],\n x2=self.x[1],\n x3=self.x[2],\n x4=self.x[3],\n x5=self.x[4],\n x6=self.x[5]\n )\n )", "def render(self):\n start_tag = format_html('<div {}>', mark_safe(' '.join(self.field_attrs)))\n output = [start_tag]\n for widget in self:\n output.append(force_text(widget))\n output.append('</div>')\n return mark_safe('\\n'.join(output))", "def display(field):\n side = int(math.sqrt(len(field))) # in number of elements (tiles)\n \n def pos():\n cy, cx = win.getyx()\n stdscr.addstr(0, 0, \"cy: \"+str(cy)+\", cx: \"+str(cx))\n\n def br():\n while True:\n c = stdscr.getch()\n if c == curses.KEY_RIGHT:\n break\n win.refresh()\n\n win.addstr(0, 0, '┏')\n for _ in range(side-1):\n win.addstr('━━━━━━')\n win.addstr('┳')\n win.addstr('━━━━━━')\n win.addstr('┓ ')\n\n for y in range(side):\n \n win.addstr('┃')\n for x in range(side):\n #stdscr.addstr(0, 0, \"side: \" + str(x))\n idx = y * side + x\n if field[idx] == 0:\n win.addstr(' '.center(6))\n else:\n n = field[idx]\n color = curses.color_pair(0)\n if n < 0:\n field[idx] *= -1\n n = field[idx]\n color = curses.A_BOLD | curses.A_STANDOUT\n elif n == 4:\n color = curses.color_pair(3)\n elif n == 8:\n color = curses.color_pair(4)\n elif n >= 16:\n color = curses.color_pair(1)\n \n #win.addstr(str(n).center(6), color)\n \n n = str(n)\n left = (6-len(n)) // 2\n right = 6 - (left + len(n))\n win.addstr(left*' ')\n win.addstr(n, color)\n win.addstr(right*' ')\n\n \n win.addstr('┃')\n win.addstr(' ')\n if y == side-1:\n break\n else: \n win.addstr('┣')\n for _ in range(side-1):\n win.addstr('━━━━━━')\n win.addstr('╋')\n win.addstr('━━━━━━')\n win.addstr('┫ ')\n \n win.addstr('┗')\n for _ in range(side-1):\n win.addstr('━━━━━━')\n win.addstr('┻')\n win.addstr('━━━━━━')\n #pos()\n #br()\n win.addstr('┛')\n\n #win.border()\n win.refresh()", "def __repr__(self):\n returnvalue = str()\n itemwidth = self._maxValueLength()\n for i in range(self._height):\n if i:\n returnvalue += '\\n'\n returnvalue += '['\n for j in range(self._width):\n if type(self._value[i][j]) is float:\n formatstring = \" %%%d.3f \" % itemwidth\n else:\n formatstring = \" %%%ds \" % itemwidth\n returnvalue += (formatstring % self._value[i][j])\n returnvalue += ']'\n return returnvalue", "def __str__(self):\n out = ' '\n for j in range(self.width):\n out += str(j + 1) + ' '\n out += '\\n'\n for i in range(self.height):\n out += '\\n'\n out += str(i + 1) + ' '\n for j in range(self.width):\n out += self.data[i][j] + ' '\n return out", "def boxTextFor(player, text = \"\"):\n pass_text = \"\"\n if isinstance( text, ( str, unicode ) ):\n pass_text = text\n return boxTextAt( pass_text, default_display_vars.borderChar_Left, default_display_vars.borderChar_Right, default_display_vars.boxText_padding, getUserScreenWidth( player ) )", "def style(self, box_style=None, border_visible=False, border_color='black',\n border_style='solid', border_width=1, border_radius=0,\n padding='0.2cm', margin=0, tabs_box_style=None,\n tabs_border_visible=True, tabs_border_colour='black',\n tabs_border_style='solid', tabs_border_width=1,\n tabs_border_radius=1, tabs_padding=0, tabs_margin=0,\n font_family='', font_size=None, font_style='', font_weight=''):\n format_box(self, box_style, border_visible, border_color, border_style,\n border_width, border_radius, padding, margin)\n format_box(self.lines_markers_box, box_style=tabs_box_style,\n border_visible=tabs_border_style,\n border_colour=tabs_border_colour,\n border_style=tabs_border_style,\n border_width=tabs_border_width,\n border_radius=tabs_border_radius, padding=tabs_padding,\n margin=tabs_margin)\n format_box(self.plot_related_options, box_style=tabs_box_style,\n border_visible=tabs_border_style,\n border_colour=tabs_border_colour,\n border_style=tabs_border_style,\n border_width=tabs_border_width,\n border_radius=tabs_border_radius, padding=tabs_padding,\n margin=tabs_margin)\n self.lines_wid.style(\n box_style=tabs_box_style, border_visible=False, padding=0,\n margin=0, font_family=font_family, font_size=font_size,\n font_weight=font_weight, font_style=font_style)\n self.markers_wid.style(\n box_style=tabs_box_style, border_visible=False, padding=0,\n margin=0, font_family=font_family, font_size=font_size,\n font_weight=font_weight, font_style=font_style)\n self.legend_wid.style(\n box_style=tabs_box_style, border_visible=tabs_border_visible,\n border_colour=tabs_border_colour, border_style=tabs_border_style,\n border_width=tabs_border_width, border_radius=tabs_border_radius,\n padding=tabs_padding, margin=tabs_margin, font_family=font_family,\n font_size=font_size, font_weight=font_weight, font_style=font_style)\n self.zoom_wid.style(\n box_style=tabs_box_style, border_visible=tabs_border_visible,\n border_colour=tabs_border_colour, border_style=tabs_border_style,\n border_width=tabs_border_width, border_radius=tabs_border_radius,\n padding=tabs_padding, margin=tabs_margin, font_family=font_family,\n font_size=font_size, font_weight=font_weight, font_style=font_style)\n self.axes_wid.style(\n box_style=tabs_box_style, border_visible=tabs_border_visible,\n border_colour=tabs_border_colour, border_style=tabs_border_style,\n border_width=tabs_border_width, border_radius=tabs_border_radius,\n padding=tabs_padding, margin=tabs_margin, font_family=font_family,\n font_size=font_size, font_weight=font_weight, font_style=font_style)\n self.grid_wid.style(\n box_style=tabs_box_style, border_visible=tabs_border_visible,\n border_colour=tabs_border_colour, border_style=tabs_border_style,\n border_width=tabs_border_width, border_radius=tabs_border_radius,\n padding=tabs_padding, margin=tabs_margin, font_family=font_family,\n font_size=font_size, font_weight=font_weight, font_style=font_style)\n format_font(self, font_family, font_size, font_style, font_weight)\n format_font(self.x_label, font_family, font_size, font_style,\n font_weight)\n format_font(self.y_label, font_family, font_size, font_style,\n font_weight)\n format_font(self.title, font_family, font_size, font_style,\n font_weight)\n format_font(self.legend_entries_text, font_family, font_size, font_style,\n font_weight)\n format_font(self.curves_dropdown, font_family, font_size, font_style,\n font_weight)", "def format_rich_text(rich_text_section):\n formatted_text = \"\"\n\n for elem in rich_text_section.get(\"elements\", []):\n elem_type = elem.get(\"type\")\n if elem_type == \"broadcast\":\n # Convert broadcasts to Discord-friendly pings:\n broadcast_range = elem.get(\"range\")\n if broadcast_range == \"channel\":\n elem_text = \"@everyone\"\n elif broadcast_range == \"here\":\n elem_text = \"@here\"\n elif elem_type == \"emoji\":\n emoji_list = app.client.emoji_list().get(\"emoji\", {})\n if emoji_list.get(elem.get(\"name\")):\n elem_text = f\":{elem.get('name')}:\"\n else:\n print(f\"Skipping over nonstandard emoji {elem.get('name', 'NO NAME')}\")\n continue\n elif elem_type == \"user\":\n # Convert @{user ID} to @{user name}:\n user_info = app.client.users_info(\n user=elem.get(\"user_id\")\n ).get(\"user\", {})\n user_name = user_info.get(\"profile\", {}).get(\"display_name\")\n if not user_name:\n # If user has no display_name (is a bot), use real_name:\n user_name = user_info.get(\"real_name\")\n if not user_name:\n # If user has no name, skip mention altogether:\n print(\"Skipping over user mention with no associated name.\")\n continue\n elem_text = f\"@{user_name}\"\n else:\n elem_text = elem.get(\"text\", \"\")\n \n style = elem.get(\"style\", {})\n\n # Prevent plain text from being rendered as markup:\n # (Code blocks by default have no markup)\n if not style.get(\"code\"):\n elem_text = elem_text.replace(\"_\", \"\\_\")\n elem_text = elem_text.replace(\"*\", \"\\*\")\n elem_text = elem_text.replace(\"`\", \"\\`\")\n elem_text = elem_text.replace(\">\", \"\\>\")\n\n # Apply appropriate styles to element's text:\n if style.get(\"bold\") or elem_type == \"user\":\n elem_text = f\"**{elem_text}**\"\n if style.get(\"italic\"):\n elem_text = f\"*{elem_text}*\"\n if style.get(\"strike\"):\n elem_text = f\"~~{elem_text}~~\"\n if style.get(\"code\"):\n elem_text = f\"`{elem_text}`\"\n\n # If element is a link, add the URL in Discord-friendly format:\n # \"[ hyperlink text ]( URL )\"\n if elem_type == \"link\":\n elem_text = f\"[{elem_text}]({elem.get('url')})\"\n\n # add formatted element's text to final markup string:\n formatted_text += elem_text\n\n # return final markup string:\n return formatted_text", "def StringFromMatchbox(index):\n\tboard = BoardFromIndex(index)\n\tmatchbox = GetMatchboxes()[index]\n\n\toutput = []\n\tfor row in range(3):\n\t\tsquares = []\n\t\tfor col in range(3):\n\t\t\tif board[row][col] == ' ':\n\t\t\t\tsquares.append('{:^3}'.format(matchbox[row*3 + col]))\n\t\t\telse:\n\t\t\t\tsquares.append('{:^3}'.format(board[row][col]))\n\t\toutput.append('|'.join(squares))\n\treturn '\\n-----------\\n'.join(output)", "def render(self):\n\n sep = '+'.join('-' * w for w in self._widths)\n sep = f'+{sep}+'\n\n to_draw = [sep]\n\n def get_entry(d):\n elem = '|'.join(f'{e:^{self._widths[i]}}' for i, e in enumerate(d))\n return f'|{elem}|'\n\n to_draw.append(get_entry(self._columns))\n to_draw.append(sep)\n\n for row in self._rows:\n to_draw.append(get_entry(row))\n\n to_draw.append(sep)\n return '\\n'.join(to_draw)", "def statBox(ax, entries, binEdges, x=0.96, y=0.98, fontsize='medium'):\n en = len(entries) # number of entries\n ov = (entries > binEdges[-1]).sum() # overflows\n uf = (entries < binEdges[0]).sum() # underflows\n mn = entries.mean() # mean\n sd = entries.std() # standard deviation\n textstr = 'N=%i \\nOverflow=%i \\nUnderflow=%i \\n$\\mu=%.2f$ \\n$\\sigma=%.2f$' % (\n en, ov, uf, mn, sd)\n props = dict(boxstyle='round', facecolor='white', alpha=0.5)\n ax.text(x, y, textstr,\n transform=ax.transAxes,\n bbox=props,\n fontsize=fontsize,\n horizontalalignment='right',\n verticalalignment='top')", "def print_grid(grid):\r\n print(\"+\",'-'*len(grid[0]*5),'+',sep='')# top line of box\r\n for i in range(len(grid)):\r\n grid_str = ''\r\n for j in range(len(grid[i])):\r\n if grid[i][j] == 0:\r\n grid_str += \"{:<5}\".format(' ')\r\n else:\r\n grid_str += \"{:<5}\".format(grid[i][j])#append a 5-width column\r\n print('|',grid_str,'|',sep='')\r\n print(\"+\",'-'*len(grid[0]*5),'+',sep='')# bottom line of box\r", "def setpopbox(ty,slist,scaledtime,rootpop,poptree):\r\n wadjust = \"\"\r\n for i in range(numpops-1):\r\n wadjust += \"00\"\r\n if(scaledtime != []):\r\n minx_popbox = textwide(wadjust+\"0.00 MYR\", tfactor)\r\n else:\r\n minx_popbox = textwide(wadjust+\"0.00 tu\", tfactor)\r\n minx_popbox /= gv[\"globalscale\"]\r\n if gv[\"localxscale\"] > 0:\r\n minx_popbox /= gv[\"localxscale\"]\r\n\r\n popxvals = []\r\n## if scaledpop == [] then no text is written on time split line and there is more width to work with\r\n for i in range(2*numpops - 1):\r\n## left side temporarily at zero, right side temporarily at upper confidence interval\r\n popxvals.append( [0,slist[4][4][i][1]])\r\n (width,c,popxvals, leftpoint,rightpoint) = centerbox(rootpop,0,popxvals[rootpop][1],poptree,popxvals)\r\n popxvals = popadjustx(popxvals,minx_popbox)\r\n popbox = []\r\n\r\n ## maxwide will be used to adjust the width as a scaler so the part furthest to the right is not too far out\r\n maxwide = 0\r\n for i in range(2*numpops-1):\r\n if maxwide < (popxvals[i][1] + (slist[4][4][i][3]-slist[4][4][i][1])):\r\n maxwide = (popxvals[i][1] + (slist[4][4][i][3]-slist[4][4][i][1]))\r\n maxwide = maxwide/(1.0-minx_popbox)\r\n\r\n if gv[\"localxscale\"] > 0:\r\n maxwide *= gv[\"localxscale\"]\r\n\r\n farright = 0\r\n confint = []\r\n for i in range(2*numpops-1):\r\n confint.append([])\r\n confint[i].append(minx_popbox + ((popxvals[i][1] - (slist[4][4][i][1]-slist[4][4][i][2]))/maxwide))\r\n confint[i].append(minx_popbox + ((popxvals[i][1] + (slist[4][4][i][3]-slist[4][4][i][1]))/maxwide))\r\n if confint[i][1] > farright:\r\n farright = confint[i][1]\r\n popbox.append([[],[]])\r\n popbox[i][0].append(minx_popbox + popxvals[i][0]/maxwide)\r\n popbox[i][1].append(minx_popbox + popxvals[i][1]/maxwide)\r\n if poptree[i][1] == -1:\r\n popbox[i][0].append(gv[\"lineINFy\"])\r\n else:\r\n popbox[i][0].append(ty[poptree[i][1]-1][0])\r\n if poptree[i][0] == 0:\r\n popbox[i][1].append(gv[\"line0y\"])\r\n else:\r\n popbox[i][1].append(ty[poptree[i][0]-1][0])\r\n return popbox,maxwide, confint, farright", "def __str__(self):\n card_str = ['-' * CARD_WIDTH]\n card_str.append('{} (Game {})'.format(self.name, self.game.number).center(CARD_WIDTH))\n card_str.append('-' * CARD_WIDTH)\n card_str.append(''.center(CARD_WIDTH))\n card_str.append('Money: {}'.format(self.money).center(CARD_WIDTH))\n card_str.append('Turnover: {}'.format(self.turnover).center(CARD_WIDTH))\n card_str.append(''.center(CARD_WIDTH))\n card_str.append('-' * CARD_WIDTH)\n return '\\n'.join('|{}|'.format(l) for l in card_str)", "def draw_boxes_texts(img,\n boxes,\n texts=None,\n colors=None,\n line_width=1,\n draw_start=False,\n box_format='x1y1x2y2'):\n assert box_format in ('x1y1x2y2', 'x1y1wh', 'xywh', 'xywha',\n 'polygon'), 'not supported box format!'\n img = imread(img)\n if len(boxes) == 0:\n return img\n boxes = copy.deepcopy(boxes)\n # convert bbox type to int\n if not isinstance(boxes, np.ndarray):\n if box_format != 'polygon':\n boxes = np.array(boxes)\n if box_format != 'xywha':\n boxes = boxes.astype(np.int)\n if len(boxes.shape) == 1:\n boxes = [boxes]\n else:\n boxes = [list(map(int, box)) for box in boxes]\n else:\n boxes = boxes.astype(np.int)\n if texts is not None and not isinstance(texts, (list, np.ndarray)):\n texts = [texts]\n if isinstance(img, Image.Image):\n img = cv.cvtColor(np.asarray(img), cv.COLOR_RGB2BGR)\n if not isinstance(img, np.ndarray):\n return\n if colors == 'random':\n colors = np.random.randint(0, 255, size=(len(boxes), 3))\n colors = [tuple(map(int, color)) for color in colors]\n text_color = (0, 255, 255)\n thickness = line_width\n font = cv.FONT_HERSHEY_SIMPLEX\n for idx, box in enumerate(boxes):\n # default color: red, BGR order\n box_color = (0, 0, 255) if colors is None else colors[idx]\n if box_format == 'x1y1x2y2':\n cv.rectangle(img, tuple(box[0:2]),\n tuple(box[2:4]), box_color, thickness)\n elif box_format == 'x1y1wh':\n box[0:4] = cvtools.x1y1wh_to_x1y1x2y2(list(box[0:4]))\n cv.rectangle(img, tuple(box[0:2]),\n tuple(box[2:4]), box_color, thickness)\n elif box_format == 'xywh':\n box[0:4] = cvtools.xywh_to_x1y1x2y2(list(box[0:4]))\n cv.rectangle(img, tuple(box[0:2]),\n tuple(box[2:4]), box_color, thickness)\n elif box_format == 'xywha':\n rrect = tuple(box[:2]), tuple(box[2:4]), box[4]\n box = cv.boxPoints(rrect).astype(np.int)\n # box = np.int0(box)\n cv.drawContours(img, [box], 0, box_color, thickness)\n box = box.reshape((-1,))\n elif box_format == 'polygon':\n # for i in np.arange(2, len(box), 2):\n # cv.line(img, tuple(box[i-2:i]),\n # tuple(box[i:i+2]), box_color, thickness)\n # cv.line(img, tuple(box[-2:]),\n # tuple(box[:2]), box_color, thickness)\n # 如果img内存非连续,cv的所有绘制都会失效\n cv.polylines(img, np.int32([np.array(box).reshape(-1, 2)]), 1, box_color, thickness)\n # cv.line(img, tuple(box[:2]), tuple(box[2:4]), box_color, thickness)\n # cv.line(img, tuple(box[2:4]), tuple(box[4:6]), box_color, thickness)\n # cv.line(img, tuple(box[4:6]), tuple(box[6:8]), box_color, thickness)\n # cv.line(img, tuple(box[6:]), tuple(box[:2]), box_color, thickness)\n if draw_start:\n cv.circle(img, tuple(box[:2]),\n radius=5, color=text_color, thickness=-1)\n if texts is not None:\n cv.putText(img, texts[idx],\n (box[0]+2, box[1]-2), font, 0.5, text_color, 1)\n return img", "def print_lower_box_line():\n print_indentation()\n print(STYLES[parameters[\"Style\"]][\"Lower left corner\"], end=\"\")\n for _ in range(text_width_with_spaces):\n print(STYLES[parameters[\"Style\"]][\"Horizontal line\"], end=\"\")\n print(STYLES[parameters[\"Style\"]][\"Lower right corner\"])", "def update_format(self, record):\n prefix = \"\\u001b[\"\n color = f\"{prefix}{self.color_map[record.levelno]}m\"\n bold = f\"{prefix}1m\"\n gray = f\"{prefix}1m{prefix}30m\"\n reset = f\"{prefix}0m\"\n self._style._fmt = (\n f\"%(asctime)s\"\n f\" {gray}│{reset} {color}%(levelname)-8s{reset} {gray}│{reset} \"\n )\n if hasattr(record, \"function\"):\n self._style._fmt += (\n f\"{gray}%(indent)s{reset}\"\n f\"{bold}%(function)s{reset}{gray}:{reset}\"\n \" %(message)s\"\n )\n else:\n self._style._fmt += \"%(indent)s%(message)s\"", "def formatOutput(self, storedText, titleMode, internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = ['<img src=\"%s\">' % escape(url, treedoc.escDict) for url\n in paths]\n return u'<br />'.join(results)", "def group_list(self, group_number=1):\n text = []\n group = self.varexercise_numbers[group_number-1]\n group_name = _('Group %s') % group[0]\n text.append('\\n\\\\subsection*{%s}\\n' % group_name)\n for number in range(1, self.number_of_variations+1):\n print(\"---------\", number) # !!!\n variation = '\\n\\n\\\\textbf{%s}\\\\\\\\\\n' %\\\n _('Variation %d') % number\n text.append(variation)\n exercise_number = 1\n for item in group[1:]:\n # print(' '*5, item) # !!!\n if not isinstance(item, str):\n latex_plain = item.latex_plain(number)\n if latex_plain:\n text.append('%d. ' % exercise_number)\n # print(' '*5, number) # !!!\n # print(' '*5, latex_plain) # !!!\n text.append(latex_plain + '\\n')\n exercise_number += 1\n # if with_solution:\n # text.extend(self.one_group(group_number=group_number))\n # text.append(\"\\\\newpage\\n\")\n return text" ]
[ "0.638623", "0.6335261", "0.62239504", "0.6142633", "0.612802", "0.6116231", "0.61149377", "0.61149377", "0.61149377", "0.61149377", "0.61149377", "0.61149377", "0.6066256", "0.6044949", "0.60073614", "0.60017395", "0.60017395", "0.5998201", "0.59273905", "0.58198524", "0.58169824", "0.57074255", "0.56900686", "0.5670092", "0.5610495", "0.56019866", "0.5591103", "0.5549327", "0.5543219", "0.5489891", "0.5480481", "0.5470064", "0.5469023", "0.5456192", "0.54541475", "0.54392487", "0.5437799", "0.54194695", "0.5408252", "0.5407614", "0.5404663", "0.5392055", "0.53855866", "0.53844845", "0.5380448", "0.5378995", "0.5376725", "0.53710836", "0.5369823", "0.536273", "0.535947", "0.5352463", "0.53351927", "0.53230923", "0.5319584", "0.53195286", "0.53148", "0.5307666", "0.53047174", "0.5303338", "0.52979743", "0.5282422", "0.52759004", "0.5274858", "0.5272641", "0.52713746", "0.52699584", "0.52648544", "0.52539796", "0.52447164", "0.5242105", "0.5227489", "0.52258706", "0.52207446", "0.5215071", "0.52014834", "0.51988846", "0.5185473", "0.5181557", "0.5176848", "0.51731753", "0.5162015", "0.5144905", "0.5134482", "0.51270986", "0.51205635", "0.51171976", "0.5114479", "0.5101576", "0.5099664", "0.5097213", "0.50873995", "0.5080191", "0.5078428", "0.5071138", "0.50708836", "0.50650126", "0.5055228", "0.50455284", "0.5045401", "0.5043688" ]
0.0
-1
Return a set of the keys in the Box
def keys(self, dependent=True, independent=False): out = set() for row in self: if independent: out.update(row[INDEP].keys()) if dependent: out.update(row[DEP].keys()) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def keys():", "def keys(self):\r\n return [k for k in self]", "def keys(self):\n return [ x for x in self ]", "def get_keys(self):\r\n return self._keys", "def keys(self):\n\n return self.keys_set", "def keysAll():", "def keys(self) -> List:\n pass", "def keys(self):\n return list(self.__iter__())", "def getkeys(self):\n return list(self.keys)", "def get_keys(self):\r\n\r\n #using database\r\n\r\n if self.using_database:\r\n aprint('GET KEYS')\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT keyword\"\r\n +\" FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {key[0] for key in fetched}\r\n\r\n return set()\r\n\r\n #using shelf\r\n\r\n return self.key_dict.keys()", "def keys(self):\n return list(self.iterkeys())", "def keys(self):\r\n return [key for key, value in self.iteritems()]", "def keys(self):\n return _keys(self)", "def keys(self):\n return _keys(self)", "def keys(self):\n return", "async def get_keys(self):\n return self.dict.keys()", "def Keys(self) -> NameObjectCollectionBase.KeysCollection:", "def keys(self):\n return self.keys", "def keys(self):\n list_all_dict = self.list_all()\n return list_all_dict[\"nodes\"] + list_all_dict[\"groups\"]", "def keys(self):\n return [key for key, value in self.items()]", "def Keys(self) -> _n_1_t_4:", "def keys(self):\n return [kvp.key for kvp in self.keyvaluepair_set.all()]", "def get_all_keys(self):\n r = []\n with self.lock:\n for key in self.keys():\n if self.get(key):\n r.append(key)\n\n return r", "def keys(self) -> List[str]:\n raise NotImplementedError", "def keys(self):\n\n return list(self.iterkeys())", "def keys(self):\n return self.__keys", "def get_keys(self) -> list:\r\n keys = []\r\n for key, value in self._items:\r\n keys.append(key)\r\n return keys", "def keys(self):\n return self._keys", "def keys(self):\n return self._keys", "def list_all_keys(self):\n \n return self.keys", "def keySet (self) -> StringSet:\n\n Logging.trace(\">>\")\n result = set(self._keyToValueMap.keys())\n Logging.trace(\"<<: %r\", result)\n return result", "def get_key_list(self) -> list:\n return self.key_functs.keys()", "def get_all_keys(self):\r\n all_keys = []\r\n for i in range(len(self.hash_table)):\r\n if self.hash_table[i] is not None:\r\n all_keys.append(self.hash_table[i].key)\r\n return all_keys", "def get_keys(self):\r\n k_list = []\r\n try:\r\n for k in self.buttons:\r\n if self.buttons[k] != 0:\r\n k_list.append(k)\r\n return k_list\r\n except KeyError:\r\n pass\r\n return k_list", "def keys(self):\n raise NotImplementedError", "def AllKeys(self) -> _n_0_t_1[str]:", "def keys(self):\n # TODO: Collect all keys in each of the buckets\n all_keys = [] # Will store all the key\n for bucket in self.buckets:\n for key in bucket:\n if key is not None:\n all_keys.append(key[0])\n return all_keys", "def keys(self):\n ks = dict.keys(self)\n ks.sort()\n return ks", "async def keys(self) -> Iterable[str]:", "def keys(self) -> t.List[str]: # type: ignore[override]\n return list(self.__keys)", "def keys(self) -> List[str]:\n return self.__stash.keys()", "def keys(self):\n with self.__plock:\n return self._keys[:]", "def keys(self) -> t.Tuple[str, ...]:\n return self._keys", "def get_keys(self):\r\n\t\tlogger.debug(\"Getting the keys\")\r\n\t\t\r\n\t\treturn db.get_items('keys')", "def keys(self):\n return self.__dict__.keys()", "def keys(self):\n return self.__dict__.keys()", "def keys(self):\n return self.__dict__.keys()", "def keys(self):\n return self.__dict__.keys()", "def keys(self):\n # Collect all keys in each bucket\n all_keys = []\n for bucket in self.buckets:\n for key, value in bucket.items():\n all_keys.append(key)\n return all_keys", "def iterkeys(self):", "def iterkeys(self):", "def get_nodes(self):\n return_set = set()\n for key in self._main_dictionary:\n return_set.add(key)\n return return_set", "def keys(self):\n if self._cubas is None:\n data = self.data\n self._cubas = set(\n CUBA[data.GetArray(index).GetName()]\n for index in range(data.GetNumberOfArrays()))\n return set(self._cubas)", "def keys(self):\n return self._sequence[:]", "def keys(self) -> Sequence[str]:\n raise NotImplementedError", "def return_keys(self):\r\n\r\n keys = list(self.piDD.keys())\r\n return keys", "def keys(self):\n\t\treturn iter(Point(x, y) for y, x in itertools.product(range(self.dims.height), range(self.dims.width)))", "def get_all_keys(self):\n return self.psettings.allKeys()", "def keys(self):\n klst = list(self._maps.keys())\n klst.sort()\n return klst", "def keys(self) -> Iterable[T]:\n return self._store.keys()", "def iterkeys(self):\n return iter(kvp.key for kvp in self.keyvaluepair_set.all())", "def keys(self):\n\t\treturn tuple(self.dist.keys())", "def keys(self):\n return self.get_list(self.cloudman.list_keypairs(),\n kind=\"key\")", "def keys(self):\n if self._keys is not None:\n return self._keys\n self._set_keys()\n return self._keys", "def get_keys(self, ckey=None):\n if ckey:\n keys = self._get_keys(ckey)\n else:\n keys = self.keys()\n for key in self.keys():\n keys += [k for k in self._get_keys(key)]\n return list(set(keys))", "def __call__(self):\n return self._main._keys()", "def iterkeys(self):\n return self.__iter__()", "def keys(self):\n return iter(k for k, _ in self._pairs())", "def keys(self):\n keys = set()\n with pd.HDFStore(self.rootpath, mode=\"r\") as hdf:\n hdf5_keys = hdf.keys()\n\n for key in hdf5_keys:\n kp = key.split(\"/\")\n if len(kp) == 5:\n print(kp, len(kp))\n keys.add(kp[4])\n return list(keys)", "def keys(self) -> Set[str]:\n return {self._build_key(key) for key in self.motions}", "def build_keyset(self, field):\n keys = set()\n for r in self.__elements__:\n keys.add(getattr(r, field))\n return list(keys)", "def all_keys(self):\n return self.derivable_keys() + self.loadable_keys()", "def _keys(obj):\n \n k = []\n \n if len(obj) > 0:\n # There is at least one thing\n for x in obj:\n # Make sure keys() is defined\n if hasattr(x, 'keys'):\n \n k.extend(x.keys())\n \n k = list(set(k))\n k.sort()\n \n return k", "def keys(self):\n return self._d.keys()", "def keys(self):\n if self.widget:\n w_keys = self.widget.keys()\n else:\n w_keys = []\n return sorted(w_keys + list(ttfont_dict_keys) + [case_s])", "def __iter__(self):\n return self.keys()", "def __iter__(self):\n\t\treturn self.keys()", "def keys(self):\n return _osgAnimation.mapVertexInfluence_keys(self)", "def get_keys(self):\n return list(self.public_keys.keys())", "def get_key_set():\n keys = [0] * n_families\n for i in range(n_families):\n keys[i] = get_key(i)\n\n return keys", "def keys(self, **kwargs) -> Iterable:\n return self.store.keys(**kwargs)", "def keys(self) -> tuple(Any, ...): # type: ignore\n return tuple(self.contents.keys())", "def get_keys_for_tag(self,tag):\r\n\r\n #using database\r\n if self.using_database:\r\n value_tuple = (notebookname, tag,)\r\n db_cursor.execute(\"SELECT keyword\"\r\n +\" FROM tags_to_keys\"\r\n +\" WHERE notebook=?\"\r\n +\" AND tag=?;\",\r\n value_tuple)\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {tag[0] for tag in fetched}\r\n\r\n return set()\r\n #using shelf\r\n if self.using_shelf:\r\n if self.tag_dict_contains(tag):\r\n return self.tag_dict[tag]\r\n return set()", "def keys(self) -> KeysView:\n return self._dict.keys()", "def keys(self):\n return self._children.keys()", "def keys(self) -> tuple[Hashable, ...]:\n return tuple([self._hashify(item = c) for c in self.contents])", "def get_keys(self):\n bucket = self.resource.Bucket(self.bucketname)\n return [key.key for key in bucket.objects.all()]", "def keys(self):\n return [entry.key for entry in self.table if entry.value is not None]", "def keys(self):\r\n L = []\r\n for wr in self.data.keys():\r\n o = wr()\r\n if o is not None:\r\n L.append(o)\r\n return L", "def keys(self):\r\n L = []\r\n for wr in self.data.keys():\r\n o = wr()\r\n if o is not None:\r\n L.append(o)\r\n return L", "def keys(self):\n raise NotImplementedError('keys() should have been replaced by a metaclass')", "def keyrefs(self):\r\n return self.data.keys()", "def keyrefs(self):\r\n return self.data.keys()", "def keyValues(self): # real signature unknown; restored from __doc__\n return []", "def keys(self):\n return self._tagged.keys()", "def keys(self):\n return list(self.token2id.values())", "def keySet(self):\n return KeySet()", "def iterkeys(self):\r\n return self.data.iterkeys()", "def keys(self):\n return self.properties.keys()", "def keys(self):\n keys = [key for key in self.__dict__.keys() if self[key] is not None]\n keys = [key for key in keys if key[:2] != '__' and key[-2:] != '__']\n return keys", "def keys(self):\n keys = [key for key in self.__dict__.keys() if self[key] is not None]\n keys = [key for key in keys if key[:2] != '__' and key[-2:] != '__']\n return keys" ]
[ "0.7376837", "0.7284792", "0.7275277", "0.7271223", "0.7257253", "0.7218919", "0.72185975", "0.71058506", "0.70899194", "0.7057549", "0.70277166", "0.7014823", "0.70009017", "0.70009017", "0.69692326", "0.69683117", "0.6958609", "0.69498646", "0.6940273", "0.69003475", "0.6895464", "0.688431", "0.6853357", "0.6838338", "0.68144125", "0.68109953", "0.68042773", "0.67959106", "0.67959106", "0.6794178", "0.67887443", "0.67776513", "0.67615", "0.6759294", "0.67425805", "0.6740035", "0.67337036", "0.67129624", "0.6696935", "0.6679513", "0.6679423", "0.667457", "0.66570866", "0.66521317", "0.6630242", "0.6630242", "0.6630242", "0.6630242", "0.66270345", "0.6624582", "0.6624582", "0.66225517", "0.6618974", "0.6615212", "0.6614299", "0.66113675", "0.66028905", "0.6593598", "0.6565208", "0.65600616", "0.6543453", "0.654211", "0.65243125", "0.6500488", "0.64954317", "0.6494715", "0.6486629", "0.6482409", "0.648161", "0.6479096", "0.6478562", "0.6478275", "0.6454757", "0.64465445", "0.6442553", "0.6431355", "0.64248055", "0.64181626", "0.6413212", "0.640523", "0.6401314", "0.6399098", "0.6394935", "0.6390474", "0.6384401", "0.63830155", "0.6380138", "0.63776505", "0.63771856", "0.63771856", "0.6361558", "0.6357659", "0.6357659", "0.635569", "0.63489115", "0.63437974", "0.63420856", "0.63354146", "0.6331471", "0.6329698", "0.6329698" ]
0.0
-1
The set methods must raise a ComponentsErrorEx in case of wrong mode
def test_wrong_mode(self): self.assertRaises(ComponentErrorsEx, self.dp.setRewindingMode, 'FOO')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modifyNotValuableComponents(self):\n # Nothing to do\n pass", "def set_comms_mode(self):", "def magic_xmode(self,parameter_s = ''):\n\n new_mode = parameter_s.strip().capitalize()\n try:\n self.InteractiveTB.set_mode(mode = new_mode)\n print 'Exception reporting mode:',self.InteractiveTB.mode\n except:\n warn('Error changing exception modes.\\n' + str(sys.exc_info()[1]))", "def check_set_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def test_parameter_mode_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self.configuration.hgst_space_mode = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_mode = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def test_set_fails_when_setting_non_primitive_type(self):\n with pytest.raises(\n ClickException, match=\"Attribute `behaviours` is not allowed to be updated!\"\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.behaviours\", \"value\"],\n standalone_mode=False,\n catch_exceptions=False,\n )", "async def test_set_swing_mode_bad_attr(opp):\n state = opp.states.get(ENTITY_CLIMATE)\n assert \"Off\" == state.attributes.get(ATTR_SWING_MODE)\n\n with pytest.raises(vol.Invalid):\n await common.async_set_swing_mode(opp, None, ENTITY_CLIMATE)\n await opp.async_block_till_done()\n\n state = opp.states.get(ENTITY_CLIMATE)\n assert \"Off\" == state.attributes.get(ATTR_SWING_MODE)", "def enable(self) -> None:", "def error(self):\n pass", "def test_mode_invalid(mode):\n # Test errors on construction\n with pytest.raises(TypeConversionError):\n gay_berne = md.pair.aniso.GayBerne(nlist=md.nlist.Cell(buffer=0.4),\n default_r_cut=2.5,\n mode=mode)\n gay_berne = md.pair.aniso.GayBerne(nlist=md.nlist.Cell(buffer=0.4),\n default_r_cut=2.5)\n gay_berne.params[('A', 'A')] = {'epsilon': 1, 'lpar': 0.5, 'lperp': 1.0}\n # Test errors on setting\n with pytest.raises(TypeConversionError):\n gay_berne.mode = mode", "def test_set_property_invalid(self):\r\n try:\r\n initial_value = self.config.values['option1']\r\n self.config.option1 = 'invalid'\r\n except Exception as e:\r\n self.assertIsInstance(e, InvalidOptionValueError)\r\n self.assertEqual(self.config.values['option1'], initial_value)", "def setMode(self,mode):\n self.mode=mode\n if self.mode==0:\n self.setDrawing()\n elif self.mode==1:\n self.setConstruction()\n elif self.mode==2:\n self.setDisplay()\n self.context.text.append(\"mode: \"+self.messages[self.mode])", "def enable(self):", "def modifyComponentsNotPreferableOnServer(self):\n # Nothing to do\n pass", "def test_handling_wrong_context(member, mode, arg, msg):\n with pytest.raises(TypeError) as excinfo:\n member.set_validate_mode(getattr(Validate, mode), arg)\n assert msg in excinfo.exconly()", "async def test_async_set_preset_mode_invalid():\n\n client = Mock()\n device_stub = Mock()\n\n logger = Mock()\n logger.debug = Mock()\n logger.warning = Mock()\n\n wrapper = WinixDeviceWrapper(client, device_stub, logger)\n\n await wrapper.async_set_preset_mode(\"INVALID_PRESET\")\n logger.warning.call_count == 1", "def validate(self, mode): # pragma: no cover\n pass", "def error(self):\n ...", "def test_component_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = ComponentAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_component_list(), self.default['component'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'component',\n ','.join(self.new['component']))\n # create component_owner option\n self.env.config.set('ticket-field-config','component_owner','test')\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_component_list(), self.new['component'])", "def testProtocolSetBadType(self):\n def setProtocol():\n self.mr.protocol = 12345\n\n self.assertRaises(\n TypeError,\n setProtocol\n )", "def test_set_mode_wrong_args(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n context.set_mode(None)", "def set_state( self ):", "async def test_set_operation_bad_attr_and_state(opp):\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"operation_mode\") == \"eco\"\n assert state.state == \"eco\"\n with pytest.raises(vol.Invalid):\n await common.async_set_operation_mode(opp, None, ENTITY_WATER_HEATER)\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"operation_mode\") == \"eco\"\n assert state.state == \"eco\"", "def test_tooManyModeParameters(self):\n self._sendModeChange(\"+s\", \"wrong\")\n self._checkModeChange([])\n errors = self.flushLoggedErrors(irc.IRCBadModes)\n self.assertEqual(len(errors), 1)\n self.assertSubstring(\"Too many parameters\", errors[0].getErrorMessage())", "def test_set_invalid_value(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"agent.agent_name\",\n \"true\",\n \"--type=bool\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1", "def setMode(self, mode):\n self.mode = mode\n if self.mode == 0:\n self.setDrawingMode()\n elif self.mode == 1:\n self.setConstructionMode()\n elif self.mode == 2:\n self.setDisplayMode()\n self.context.text.append(\"mode: \" + self.messages[self.mode])", "def mode (self, mode) :\r\n self.mode_ = mode", "def set_error(self, index: int) -> None:\n ...", "def __init__(self, message=\"\"):\n super(AutomationError, self).__init__(message)", "def xmode(self, parameter_s=''):\n\n def xmode_switch_err(name):\n warn('Error changing %s exception modes.\\n%s' %\n (name,sys.exc_info()[1]))\n\n shell = self.shell\n if parameter_s.strip() == \"--show\":\n shell.InteractiveTB.skip_hidden = False\n return\n if parameter_s.strip() == \"--hide\":\n shell.InteractiveTB.skip_hidden = True\n return\n\n new_mode = parameter_s.strip().capitalize()\n try:\n shell.InteractiveTB.set_mode(mode=new_mode)\n print('Exception reporting mode:',shell.InteractiveTB.mode)\n except:\n xmode_switch_err('user')", "def set_mode(self,mode,state=True):\n\t\tprint \"SET_MODE START\"\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tif val.index(mode) is not None:\n\t\t\t\tif state:\n\t\t\t\t\tval.activate( val.index(mode) )\n\t\t\t\telse:\n\t\t\t\t\tval.deactivate( val.index(mode) )\n\t\t\"\"\"\n\t\tprint \"SET_MODE DONE -- ALSO DOING EXPERIMENTAL -- \"\n\t\t# DEBUG / EXPERIMENTAL\n\t\tif self.int_encoder is not None:\n\t\t\tif mode == 'volume' and state == True and 'mode_timeout' in self.cfg_gpio and self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. GPIO/VOLUME ({0}:{1}).. disabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.remove_event_detect(13)\n\t\t\t\tself.gpio.remove_event_detect(6)\n\t\t\t\tself.int_enabled = False\n\t\t\telif mode != 'volume' and state == True and 'mode_timeout' in self.cfg_gpio and not self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. GPIO/NOT VOLUME ({0}:{1}).. enabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.setup((13,6), self.gpio.IN, pull_up_down=self.gpio.PUD_DOWN)\n\t\t\t\tself.gpio.add_event_detect(13, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime \n\t\t\t\tself.gpio.add_event_detect(6, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime\n\t\t\t\tself.int_enabled = True\n\t\t\telif mode == 'volume' and state == True and 'mode_timeout' not in self.cfg_gpio and not self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. ECA/VOLUME ({0}:{1}).. enabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.setup((13,6), self.gpio.IN, pull_up_down=self.gpio.PUD_DOWN)\n\t\t\t\tself.gpio.add_event_detect(13, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime \n\t\t\t\tself.gpio.add_event_detect(6, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime\n\t\t\t\tself.int_enabled = True\n\t\t\telif mode != 'volume' and state == True and 'mode_timeout' not in self.cfg_gpio and self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. ECA/NOT VOLUME ({0}:{1}).. disabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.remove_event_detect(13)\n\t\t\t\tself.gpio.remove_event_detect(6)\n\t\t\t\tself.int_enabled = False\n\t\t\tprint \"DEBUG2.. done\"\n\t\t\"\"\"", "def setup(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def set_mode(self, mode: str) -> None:\n # Not all programs are fully supported by the current\n # OpenInterface API version. The known restricitons are:\n # - The 'Calibration' and 'TightnessTest' programms cannot\n # be started through the API.\n # - The 'Dry' program does not expose all it's parameters\n # (see github.com/buchi-labortechnik-ag/openinterface_rotavapor/issues/1)\n return self.send(self.cmd.SET_MODE, mode)", "def setError(self,err):\n self.error = err", "def _combo_mode_changed(self, *a):\r\n \r\n # Get the current mode.\r\n mode = self.api.get_mode()\r\n \r\n # Update the machine if the combo mode doesn't match\r\n if not mode == self.combo_mode.get_text():\r\n self.api.set_mode(self.combo_mode.get_text())\r\n \r\n # Get the mode again, to make sure it still matches.\r\n if self.api.get_mode() == 'List': \r\n self.combo_mode.set_index(1, block_events=True).enable()\r\n self.number_list_index.set_value(self.api.get_list_index(), block_events=True).enable()\r\n self._number_list_index_changed()\r\n self.number_frequency.disable() #Disable the frequency button\r\n self.number_dbm.disable() #Disable the dbm button\r\n else: \r\n #It is in fixed mode and we update the value of the button\r\n self.combo_mode.set_index(0, block_events=True).enable()\r\n self.number_frequency.set_value(self.api.get_frequency()).enable()\r\n self.number_dbm .set_value(self.api.get_power()).enable()\r\n self.number_list_index.disable() #Change the list index. \r", "def test_call(self):\r\n p = RepSetPicker({})\r\n self.assertRaises(NotImplementedError, p, '/path/to/seqs',\r\n '/path/to/otus')", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def test_set_attributes_error(self):\n r = Resources()\n attr_lst = [\"num_wires\", \"num_gates\", \"depth\", \"shots\", \"gate_types\"]\n\n for attr_name in attr_lst:\n with pytest.raises(FrozenInstanceError, match=\"cannot assign to field\"):\n setattr(r, attr_name, 1)", "def error(self):\n raise NotImplementedError(\"subclasses need to override this method\")", "def test_set_value_invalid(self):\r\n name = 'option1'\r\n option = self.config.options[name]\r\n value = 'invalid'\r\n initial_value = self.config.values[name]\r\n\r\n self.assertRaises(InvalidOptionValueError, self.config.set_value, name, option, value)\r\n self.assertEqual(self.config.values[name], initial_value)", "def test_Alpha_setter_invalid(self):\r\n self.assertRaises(ValueError, setattr, self.mc, 'Alpha', -5)\r\n self.assertRaises(ValueError, setattr, self.mc, 'Alpha', 2)", "def test_tooFewModeParameters(self):\n self._sendModeChange(\"+o\")\n self._checkModeChange([])\n errors = self.flushLoggedErrors(irc.IRCBadModes)\n self.assertEqual(len(errors), 1)\n self.assertSubstring(\"Not enough parameters\", errors[0].getErrorMessage())", "def error(self, msg):\n vim.command('call pymode#error(\"%s\")' % str(msg))", "def enable(self):\n pass", "def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()", "def test_broken_error_descriptor(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule", "def _SetMode(self, newmode, msg):\n self.buffer = u'' # Clear buffer from last mode\n self.mode = newmode\n # Update status bar\n evt = ed_event.StatusEvent(ed_event.edEVT_STATUS, self.stc.GetId(),\n msg, ed_glob.SB_BUFF)\n wx.PostEvent(self.stc.GetTopLevelParent(), evt)", "def set(self):\n\n raise Exception(\"Can't set frmt.\")", "def test_component_without_owner_is_trac_error(self):\n # We create an instance of the panel so we can check existing values\n panel = ComponentAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_component_list(), self.default['component'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'component',\n ','.join(self.new['component']))\n\n # we purposely forget to add component_owner to config\n # and run the plugin expecting a TracError\n admin_command = TicketFieldConfigCommand(self.env)\n self.assertRaises(TracError,admin_command.set_fields_from_config)", "def test_update_composition(self):\n pass", "def test_preprocessing_status_setter_valueerror(self):\n with self.assertRaises(ValueError):\n self.tester.preprocessing_status = 'not a valid state'", "def _SetRequiredCoreModules(self, textEdit, frame, tab, controls):\n self.text_editor = self.system_modules[textEdit]\n self.mf = self.system_modules[frame]\n self.mt = self.system_modules[tab]\n self.mc = self.system_modules[controls]", "def React(self):\n ##Disable DesktopMode if Xplorer & Conductor == False\n #self.state.React(self.state.GetSurface(\"Xplorer\") == False and\n # self.state.GetSurface(\"Conductor\") == False,\n # \"DesktopMode\", False)\n if self.state.GetSurface(\"DesktopMode\"):\n self.rbXplorer.SetSelection(0)\n \n \n self.UpdateDisplay()\n return", "def setMode(self, newmode=None):\n if newmode==None and self.mode: return\n \n # find it in my dictionary\n for k,v in self.items():\n if k.lower() == \"mode\":\n if newmode:\n self.mode = newmode\n self[k] = str(self.mode)\n else:\n self.mode = int(v)\n \n # it wasn't in the dictionary\n if newmode and not self.mode:\n self.mode = newmode\n self[\"MODE\"] = str(self.mode)\n \n if not self.mode:\n raise NetworkException(\"Supplink mode not set: \" + str(self))", "def __call__(self):\r\n raise self", "def __call__(self):\r\n raise self", "def modes(self, mode):\n # Sends the update to the piston worker\n self.worker_piston.mode = mode\n if mode == 1: # 'VCV'\n self.VCV_start_btn.setEnabled(False)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 2: # 'PCV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(False)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 3: # 'PSV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(False)\n self.stop_btn.setEnabled(True)\n elif mode == 4: # 'Emergency'\n print('Emergency')\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)\n else: # STOP\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)", "def check_enable_mode(self, *args, **kwargs):\n pass", "def set_mode(self, mode):\n SetMode_srv = SetModeRequest(0, mode)\n response = self.set_mode_client(SetMode_srv)\n if response.mode_sent:\n rospy.loginfo(CGREEN2 + \"SetMode Was successful\" + CEND)\n return 0\n else:\n rospy.logerr(CRED2 + \"SetMode has failed\" + CEND)\n return -1", "def invalid(self):\n pass", "def support(self):", "def enable(self):\n raise NotImplementedError", "def validate(self):\n Component.validate(self)\n kinds = (\"lib\", \"exe\")\n if self.kind not in kinds:\n raise Invalid(\"kind must be one of %s for component %s\" % (kinds,self.name))\n\n if self.kind == \"exe\" :\n if not self.exe_path:\n raise Invalid(\"exe_path must be defined for component %s\" % self.name)", "def set_invalidated(self):\n # GTK Settings for evogtk\n self.set_property('image',self.__errorimg)", "def precheck(self):\n if self.__chardev_obj is None:\n self.logger.exception(\"[IPMI] -chardev should set.\")\n raise Exception(\"-chardev should set.\")", "def test_call_incompatible_data(self):\r\n self.cs_overview.DistanceMatrices = [self.single_ele_dm,\r\n self.single_ele_dm]\r\n self.assertRaises(ValueError, self.cs_overview)", "def __setitem__(self, key: Any, value: Any) -> NoReturn:\n raise RuntimeError(\"magicgui.Container does not support item setting.\")", "def Go_Up_Set_Settings_Failed(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def error(self, error):\n pass", "def test_mode_from_knx_wrong_value(self):\n with pytest.raises(ConversionError):\n DPTHVACMode.from_knx((1, 2))", "def P_SetModeSuccess(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def testWrongMode(self):\n self.mgr.status = mavutil.mavlink.GOPRO_HEARTBEAT_STATUS_DISCONNECTED\n self.mgr.handleRecordCommand( CAPTURE_MODE_VIDEO, RECORD_COMMAND_TOGGLE )\n self.assertFalse(self.mgr.sendGoProCommand.called)", "def error(self, msg, *args, **kwargs):\n pass", "def testPowerBadType(self):\n def setPower():\n self.cc.power = 'ban'\n\n self.assertRaises(\n TypeError,\n setPower\n )", "def test_set_molecule_error(self):\n mol = Molecule.from_smiles(\"CCO\")\n atom = Atom(6, 0, False)\n atom.molecule = mol\n with pytest.raises(AssertionError, match=\"already has an associated molecule\"):\n atom.molecule = mol", "def error(self, message=None, show_help=True):", "def run_component(self):\n raise NotImplementedError", "def test_handle_flag_error(self):\n pass", "def _set_operation_mode(self, operation_mode: HVACMode):\n if operation_mode == HVACMode.HEAT:\n success = self._client.set_mode(self._client.MODE_HEAT)\n elif operation_mode == HVACMode.COOL:\n success = self._client.set_mode(self._client.MODE_COOL)\n elif operation_mode == HVACMode.AUTO:\n success = self._client.set_mode(self._client.MODE_AUTO)\n else:\n success = self._client.set_mode(self._client.MODE_OFF)\n\n if not success:\n _LOGGER.error(\"Failed to change the operation mode\")\n return success", "def _setmode(self, mode=None):\n if mode is None:\n return self._mode\n if mode not in [\"standard\", \"logo\", \"world\"]:\n return\n self._mode = mode\n if mode in [\"standard\", \"world\"]:\n self._angleOffset = 0\n self._angleOrient = 1\n else: # mode == \"logo\":\n self._angleOffset = self._fullcircle/4.\n self._angleOrient = -1", "def __init__(self):\n\n super(aero_csm_component,self).__init__()", "def test_set_options_wrong_args(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n context.set_options(None)", "def setThrowLicenseExceptions(self,value):\n self.PDFreactorConfiguration.in1[\"throwLicenseExceptions\"] = value", "def error(self, *args, **kwargs):", "def test_regulation_mode():\n with expected_protocol(\n DCXS,\n [\n (\"D0\", None),\n (\"c\", 0),\n ],\n ) as inst:\n inst.regulation_mode = \"power\"\n assert \"power\" == inst.regulation_mode", "def failed(self):\n\t\tpass", "def _check_mode(self):\n if self.mode is None:\n raise RuntimeError(\"Please set pin numbering mode using GPIO.setmode(GPIO.BOARD) or GPIO.setmode(GPIO.BCM)\")", "def __init__(self):\n raise", "def test_DistanceMatrices_setter_too_few(self):\r\n self.assertRaises(ValueError, setattr, self.cs, 'DistanceMatrices', [])\r\n # Also test that constructor raises this error.\r\n self.assertRaises(ValueError, CorrelationStats, [])", "def unexpectedException(self):" ]
[ "0.58727384", "0.5831983", "0.57986915", "0.5704553", "0.56496924", "0.5616517", "0.5566472", "0.55448025", "0.54887784", "0.54351026", "0.54308224", "0.54267853", "0.54206556", "0.5400841", "0.5366232", "0.5362594", "0.5343893", "0.53403306", "0.53249055", "0.5301672", "0.5286788", "0.5281175", "0.5260891", "0.5248365", "0.5246494", "0.5238808", "0.5200003", "0.5199685", "0.5176162", "0.5167902", "0.5158558", "0.51078314", "0.5103893", "0.5103553", "0.510173", "0.50964195", "0.50921136", "0.50921136", "0.50921136", "0.50921136", "0.50921136", "0.50921136", "0.50921136", "0.50921136", "0.50921136", "0.50921136", "0.50921136", "0.5087899", "0.5084921", "0.5081172", "0.50790304", "0.5070929", "0.505626", "0.50556827", "0.50532573", "0.5043936", "0.5033145", "0.50270617", "0.50233287", "0.50229305", "0.5003271", "0.49996343", "0.49991763", "0.4999016", "0.49988508", "0.49988508", "0.49908262", "0.4979302", "0.49786702", "0.49771416", "0.49766955", "0.4970776", "0.4966833", "0.4964023", "0.49630788", "0.49445593", "0.49435502", "0.49245584", "0.49190843", "0.49127957", "0.48917505", "0.4891106", "0.48896506", "0.48875928", "0.4886958", "0.48852155", "0.4883651", "0.4881376", "0.48800796", "0.4875375", "0.48710623", "0.48708197", "0.48636281", "0.4841104", "0.4840877", "0.48388386", "0.483842", "0.4835474", "0.48319376", "0.4829962" ]
0.6587823
0
The argument of set must be returned by get in case of right mode
def test_right_mode(self): self.dp.setRewindingMode('AUTO') self.assertEqual(self.dp.getRewindingMode(), 'AUTO') self.dp.setRewindingMode('MANUAL')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set():", "def set():\n pass", "def test_set_with_get(self):\n storage = Storage()\n storage.set('1', 1)\n self.assertEqual(1, storage.set('1', 2, get=True), \"Should return previous value\")\n self.assertEqual(2, storage.get('1'), 'Should get new value')\n self.assertEqual(None, storage.set('2', 1, get=True), \"Should return None as there was no key '2'\")", "def set(x):\n pass", "def Set(self) -> None:", "def _get_set(self, key, operation, create=False, decode=False):\n return self._get_by_type(key, operation, create, b'set', set(), decode=decode)", "def getSets():", "def get_from_set(set_):\n for e in set_: return e", "def _get_other_set(self, other):\n return other._set if isinstance(other, ReadingSet) else other", "def f_set(self, data):\n raise NotImplementedError(\"Should have implemented this.\")", "def visit_Set(self, node):\n self.generic_visit(node)\n return to_call(to_attribute(self.operator, '__set__'), node.elts)", "def getSet(unique_name):", "def getSet(unique_name):", "def handle_set(self, agent) -> Tuple[Optional[str], Any]:\n ref_obj_d = {\"filters\": self.action_dict[\"filters\"]}\n ref_objs = self.subinterpret[\"reference_objects\"](\n self, self.speaker_name, ref_obj_d, extra_tags=[\"_physical_object\"]\n )\n if len(ref_objs) == 0:\n raise ErrorWithResponse(\"I don't know what you're referring to\")\n\n triples_d = self.action_dict[\"upsert\"][\"memory_data\"].get(\"triples\")\n if len(triples_d) == 1 and triples_d[0][\"pred_text\"] == \"has_name\":\n # the set has a name; check to see if one with that name exists,\n # if so add to it, else create one with that name\n name = triples_d[0][\"obj_text\"]\n set_memids, _ = self.memory.basic_search(\n \"SELECT MEMORY FROM Set WHERE (has_name={} OR name={})\".format(name, name)\n )\n if not set_memids:\n # make a new set, and name it\n set_memid = SetNode.create(self.memory)\n self.memory.add_triple(subj=set_memid, pred_text=\"has_name\", obj_text=name)\n else:\n # FIXME, which one\n set_memid = set_memids[0]\n else:\n # an anonymous set, assuming its new, and defined to hold the triple(s)\n set_memid = SetNode.create(self.memory)\n for t in triples_d:\n self.memory.add_triple(\n subj=set_memid, pred_text=t[\"pred_text\"], obj_text=t[\"obj_text\"]\n )\n for r in ref_objs:\n self.memory.add_triple(subj=r.memid, pred_text=\"member_of\", obj=set_memid)\n\n # FIXME point to the objects put in the set, otherwise explain this better\n self.memory.dialogue_stack_append_new(Say, \"OK made those objects into a set \")\n return None, None", "def test_get(self):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1,2,'three'],\n '4': {1:'one', 2:'two'}}\n for key in keys_to_set.keys():\n storage.set(key, keys_to_set[key])\n\n values = [storage.get(key) for key in keys_to_set.keys()]\n true_values = [keys_to_set[key] for key in keys_to_set.keys()]\n self.assertEqual(true_values, values)\n self.assertRaises(StorageKeyError,storage.get, '0')", "def test_set_get(self):\n self.shell.onecmd(\"create %s/one 'hello'\" % (self.tests_path))\n self.shell.onecmd(\"set %s/one 'bye'\" % (self.tests_path))\n self.shell.onecmd(\"get %s/one\" % (self.tests_path))\n self.assertEqual(\"bye\\n\", self.output.getvalue())", "def test_set_moe_rewrite(self):\n storage = Storage()\n storage.set('1', 1, 5)\n storage.set('1', 2)\n self.assertEqual(False, '1' in storage._moe_dict, \"Moe for key '1' should be reset.\")", "def test_verify_set(self):\n self._verify([self.applied_commands['set']])", "def check_setget(self, key, value):\n\n\t\tyield self.conn.set(key, value)\n\n\t\tnew_value = yield self.conn.get(key)\n\t\tself.assertEqual(new_value, value)\n\n\t\tself.assert_((yield self.conn.delete(key)))\n\n\t\tnew_value = yield self.conn.get(key)\n\t\tself.assertEqual(new_value, None)", "def getset(self, name, value):\r\n return self.format_bulk('GETSET', name, value)", "def pre_set(self, value):\r\n return value", "def test_set_passed_as_iterable():\n tree = Tree([10, 5, 100])\n assert tree.root.value == 10\n assert tree.root.left.value == 5\n assert tree.root.right.value == 100", "def _should_set(self, key, mode):\n\n if mode is None or mode not in [\"nx\", \"xx\"]:\n return True\n\n if mode == \"nx\":\n if key in self.redis:\n # nx means set only if key is absent\n # false if the key already exists\n return False\n elif key not in self.redis:\n # at this point mode can only be xx\n # xx means set only if the key already exists\n # false if is absent\n return False\n # for all other cases, return true\n return True", "def modeset(self,modesetid):\n\t\tif modesetid in self.ms_all:\n\t\t\treturn copy.deepcopy(self.ms_all[modesetid].simple())\n\t\telse:\n\t\t\treturn", "def _apply_to_sets(self, func, operation, keys, *args):\n keys = self._list_or_args(keys, args)\n if not keys:\n raise TypeError(\"{} takes at least two arguments\".format(operation.lower()))\n left = self._get_set(keys[0], operation) or set()\n for key in keys[1:]:\n right = self._get_set(key, operation) or set()\n left = func(left, right)\n return left", "def test_exclusive(self):\n s = djset()\n s.add([1, 2, 3])\n s.add([4, 5, 6])\n self.assertEquals({1, 2, 3}, s.data[1])\n self.assertEquals({4, 5, 6}, s.data[4])", "def set(self):\n return self.cdb.code_to_card_set[self.set_code]", "def command_rset(self, arg):\n if arg:\n raise errors.BadArguments('RSET')\n self.reset_arguments()\n self.write_ok()", "def get_set(self, which_set):\n return (getattr(self, 'x_' + which_set),\n getattr(self, 'y_' + which_set))", "def test_io_success(self):\r\n m1 = TestSetModel.create(int_set={1, 2}, text_set={'kai', 'andreas'})\r\n m2 = TestSetModel.get(partition=m1.partition)\r\n\r\n assert isinstance(m2.int_set, set)\r\n assert isinstance(m2.text_set, set)\r\n\r\n assert 1 in m2.int_set\r\n assert 2 in m2.int_set\r\n\r\n assert 'kai' in m2.text_set\r\n assert 'andreas' in m2.text_set", "def testPartialAndIncorrectSetter(self):\n _1 = [ (self.kl[0], 1), \n (self.kl[1], 1), \n (Customer, 1), ] * 2\n self.o.population = _1\n _2 = self.o.population\n self.assertEqual(len(_2), 3, \"got {}\".format(_2))\n _expect = set([(self.kl[0], 2),\n (self.kl[1], 2),\n (getattr(tp, \"RandConso\"), 6)])\n self.assertEqual(_2, _expect, \"something odd\")", "def set(self, **kwargs):\n raise NotImplementedError", "def test_set_add(self):\n\n self.feature_test.set_percentage(15)\n key = self.feature_test._get_redis_key()\n set_key = Feature._get_redis_set_key()\n self.assertTrue(cloak.redis.sismember(set_key, key))", "def testExceedingSetter(self):\n _1 = [ (self.kl[0], 3),\n (self.kl[1], 3),\n (Customer, 1), ] * 2\n self.o.population = _1\n _2 = self.o.population\n self.assertEqual(len(_2), 2, \"got {}\".format(_2))\n _expect = set([(self.kl[1], 4), (self.kl[0], 6), ])\n self.assertEqual(_2, _expect, \"something odd\")", "def test_get_software_set(self):\n pass", "def test_mapping_switch():\n\tassert nset != oset", "def is_set(obj):\n return type(obj) is set", "def set(self, arg: SeField[Any]) -> str:\n if is_bare_set(arg.type):\n return f\"list({arg.varname}) if convert_sets else {arg.varname}\"\n else:\n earg = arg[0]\n earg.name = \"v\"\n return (\n f\"[{self.render(earg)} for v in {arg.varname}] \"\n f\"if convert_sets else set({self.render(earg)} for v in {arg.varname})\"\n )", "def test_verify_set_multi(self):\n self._verify([self.applied_commands['setm']])", "def set(self, name, value, **kwargs):\r\n if kwargs:\r\n if 'getset' in kwargs:\r\n warnings.warn(DeprecationWarning(\r\n \"getset option to 'set' is deprecated, \"\r\n \"use Redis.getset() instead\"))\r\n if kwargs['getset']:\r\n return self.getset(name, value)\r\n if 'preserve' in kwargs:\r\n warnings.warn(DeprecationWarning(\r\n \"preserve option to 'set' is deprecated, \"\r\n \"use Redis.setnx() instead\"))\r\n if kwargs['preserve']:\r\n return self.setnx(name, value)\r\n return self.format_bulk('SET', name, value)", "def set(self, U):\n pass", "def set(self, U):\n pass", "def load_set(self, set_name):\n if set_name == 'test':\n return self.xtest", "def set(self) -> set:\n return set(self)", "def __init__(self):\n self.set = set()", "def set(**args):\n return Context(args)", "def test_set(self):\n a = set()\n a.add('b')\n a.add('c')\n a.add('a')\n b = list(a)\n b.sort()\n self.assertEqual(b, ['a', 'b', 'c'])\n a.remove('b')\n b = list(a)\n b.sort()\n self.assertEqual(b, ['a', 'c'])\n\n a.discard('d')\n\n b = set(['r', 's'])\n d = a.union(b)\n b = list(d)\n b.sort()\n self.assertEqual(b, ['a', 'c', 'r', 's'])", "def __getitem__(self, index):\n assert(isinstance(index,int)), \"Index should be an integer value\"\n assert(0 <= index < len(self.set)), \" Index out of bounds\"\n return self.set[index]", "def test_single(self):\n s = djset()\n s.add([1, 2, 3])\n self.assertEquals({1, 2, 3}, s.data[1])", "def _call_set(vecObj, val):\n res = vecObj.set(val)\n return res", "def set(self, key: t.Hashable, value: t.Any) -> None:", "def parseSet(cmds):\n if len(cmds) != 0:\n first = str.strip(cmds[0])\n if first[0] == 'w':\n pass\n elif first[0] == 'r':\n pass\n else:\n parseExpr(first)\n parseSet(cmds[1:])", "def isgetsetdescriptor(object):\r\n return False", "def define_set():\n set_1 = set([1, 2, 3])\n print type(set_1)\n print set_1\n\n set_2 = {2, 3, 2}\n print type(set_2)\n # <type 'set'>\n print set_2\n # set([2, 3])\n\n a = set((1, 2, 3, 4))\n b = set([3, 4, 5, 6])\n print a | b # Union\n # {1, 2, 3, 4, 5, 6}\n print a & b # Intersection\n # {3, 4}\n print a < b # Subset\n # False\n print a - b # Difference\n # {1, 2}\n print a ^ b # Symmetric Difference\n # {1, 2, 5, 6}", "def getSets(unique_name=None):", "def get_inputs(self):\n return set()", "def set(self, key, value):", "def set(self, key, value):", "def get_one(_set):\r\n assert _set # _set is not empty\r\n return next(iter(_set))", "def get_settemp(self):\n return self.settemp", "def __call__(self, column, setvalue=None, delete=False):\n if delete:\n self.delcolumn(column, False)\n elif setvalue is None:\n return self.getcolumn(column, False)\n else:\n self.setcolumn(column, setvalue, False)", "def test_get_software_set_expanded(self):\n pass", "def get(self, key):", "def get(self, key):", "def owningSet(self) -> ghidra.util.graph.KeyIndexableSet:\n ...", "def PermissionSet(self) -> _n_6_t_0:", "def do_set(self, arg):\n try:\n statement, paramName, val = arg.parsed.raw.split(None, 2)\n val = val.strip()\n paramName = paramName.strip().lower()\n if paramName not in self.settable:\n hits = [p for p in self.settable if p.startswith(paramName)]\n if len(hits) == 1:\n paramName = hits[0]\n else:\n return self.do_show(paramName)\n currentVal = getattr(self, paramName)\n if (val[0] == val[-1]) and val[0] in (\"'\", '\"'):\n val = val[1:-1]\n else:\n val = cast(currentVal, val)\n setattr(self, paramName, val)\n self.stdout.write('%s - was: %s\\nnow: %s\\n' % (paramName, currentVal, val))\n if currentVal != val:\n try:\n onchange_hook = getattr(self, '_onchange_%s' % paramName)\n onchange_hook(old=currentVal, new=val)\n except AttributeError:\n pass\n except (ValueError, AttributeError, NotSettableError):\n self.do_show(arg)", "def __getitem__(self, name: str) -> Set[BaseAssignment]:\n ...", "def get_set():\n\twhile True:\n\t\ttry:\n\t\t\tS = raw_input().split()\n\t\t\tS = set(map(int, S))\n\t\t\tbreak\n\t\texcept:\n\t\t\tprint \"Input must be integers\"\n\t\t\tcontinue\n\treturn S", "def do_set(self, arg):\n try:\n statement, param_name, val = arg.parsed.raw.split(None, 2)\n val = val.strip()\n param_name = param_name.strip().lower()\n if param_name not in self.settable:\n hits = [p for p in self.settable if p.startswith(param_name)]\n if len(hits) == 1:\n param_name = hits[0]\n else:\n return self.do_show(param_name)\n current_val = getattr(self, param_name)\n if (val[0] == val[-1]) and val[0] in (\"'\", '\"'):\n val = val[1:-1]\n else:\n val = cast(current_val, val)\n setattr(self, param_name, val)\n self.poutput('%s - was: %s\\nnow: %s\\n' % (param_name, current_val, val))\n if current_val != val:\n try:\n onchange_hook = getattr(self, '_onchange_%s' % param_name)\n onchange_hook(old=current_val, new=val)\n except AttributeError:\n pass\n except (ValueError, AttributeError):\n self.do_show(arg)", "def get_last_set(self):\n return self.set", "def do_set(self, args):\n\t\tif len(args) == 0:\n\t\t\tself.parent.printErr(\"Missing argument(s)\")\n\t\t\treturn False\n\t\targs = args.split()\n\t\tftype = args[0]\n\t\tvalues = \" \".join(args[1:])\n\n\t\tif ftype not in self.SET_ARGUMENTS:\n\t\t\tself.parent.printErr(\"Invalid argument: %s\" % (ftype))\n\t\t\treturn False\n\t\tif values not in self.SET_ARGUMENTS[ftype]:\n\t\t\tself.parent.printErr(\"Invalid value for argument '%s': %s\" % (ftype, values))\n\t\t\treturn False\n\n\t\tself.parent.filter[ftype] = values\n\t\tself.apply_filter()\n\t\tself._update_prompts()", "def get_bookset(self): # pragma: no coverage\r\n raise NotImplementedError()", "def _get_set_dscp(self):\n return self.__set_dscp", "def _get_set_dscp(self):\n return self.__set_dscp", "def _get_set_dscp(self):\n return self.__set_dscp", "def _get_set_dscp(self):\n return self.__set_dscp", "def _get_set_dscp(self):\n return self.__set_dscp", "def _get_set_dscp(self):\n return self.__set_dscp", "def test_with_set_tuple(self):\n self.assertListEqual([5, 4, 3, 2, 1], switch_reverser((1, 2, 3, 4, 5)))\n self.assertListEqual([5, 4, 3, 2, 1], switch_reverser({1, 2, 3, 4, 5}))", "def _test_set_get(self):\n # Not allowed when the system is not yet configured\n self.assertRaises(NotAllowedError, self.p.setOffset, 2)\n self.p.setup(siteInfo={}, source=None, device=self.device)\n self.p.setOffset(2)\n self.assertEqual(self.p.getOffset(), 2)\n self.assertEqual(self.p.getPosition(), 2)\n self.p.clearOffset()\n self.assertEqual(self.p.getOffset(), 0)", "def make_io_set(self):\n pass", "def toggle(collection: set[_T], item: _T | None) -> set[_T]:\n\n if item is None:\n return collection\n\n if item in collection:\n return collection - {item}\n else:\n return collection | {item}", "def test_get_key_digest_with_integer_set(self, set_val):\n\n with pytest.raises(TypeError):\n self.as_connection.get_key_digest(\"test\", set_val,\n \"get_digest_key\")", "def testMultiSet3(self):\n data_store.DB.MultiSet(self.test_row,\n {\"aff4:size\": [1],\n \"aff4:stored\": [\"2\"]},\n token=self.token)\n\n data_store.DB.MultiSet(self.test_row, {\"aff4:stored\": [\"2\"]},\n to_delete=[\"aff4:size\"],\n token=self.token)\n\n # This should be gone now\n (stored, _) = data_store.DB.Resolve(self.test_row, \"aff4:size\",\n token=self.token)\n self.assertEqual(stored, None)\n\n (stored, _) = data_store.DB.Resolve(self.test_row, \"aff4:stored\",\n token=self.token)\n self.assertEqual(stored, \"2\")", "def AddGet_SetGet_test(type: str):\n m = pyflamegpu.ModelDescription(\"model\")\n ed = m.Environment()\n add_func = getattr(ed, f\"newProperty{type}\")\n get_func = getattr(ed, f\"getProperty{type}\")\n set_func = getattr(ed, f\"setProperty{type}\")\n add_func(\"a\", 1)\n assert get_func(\"a\") == 1\n assert set_func(\"a\", 2) == 1\n assert get_func(\"a\") == 2", "def __init__(self, set):\n Rule.__init__(self)\n self.__set = set", "def __init__(self, set):\n Rule.__init__(self)\n self.__set = set", "def arg_set(self):\n return frozenset(self._args)", "def testSimpleSet(self):\n self.client_connect()\n self.client_send('set simpleSet 0 0 1\\r\\n')\n self.client_send('1\\r\\n')\n self.mock_recv('set simpleSet 0 0 1\\r\\n1\\r\\n')\n self.mock_send('STORED\\r\\n')\n self.client_recv('STORED\\r\\n')", "def get(self, key):\n pass", "def get_set(css_class_name, set_num=0):\r\n if not root:\r\n return None\r\n item = root.xpath('//dl[@class=\"%s\"]/dd' % css_class_name)\r\n if len(item) <= set_num:\r\n return None\r\n sets_node = item[set_num]\r\n item_set = set([ut.unicodeanyway(node.text).replace('\\n', '')\r\n for node\r\n in sets_node.xpath('.//a') if node.text is not None])\r\n \r\n \r\n \r\n return item_set", "def __ror__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__ror__', other)", "def __setitem__(self, key, val):\n self.set[key] = val", "def get_result(self, collector_set):\n\t\traise NotImplementedError", "def test_value_set_with_added_packages_retrieves_resource(self):\n r1 = self._valueset_cls()\n r1.url.value = 'r1'\n\n r2 = self._valueset_cls()\n r2.url.value = 'r2'\n\n package = fhir_package.FhirPackage(\n ig_info=fhir_package.IgInfo(\n name='name',\n version='version',\n description='description',\n canonical='url',\n ),\n structure_definitions=mock_resource_collection_containing([]),\n search_parameters=mock_resource_collection_containing([]),\n code_systems=mock_resource_collection_containing([]),\n value_sets=mock_resource_collection_containing([r1, r2]),\n )\n self.assertEqual(package.get_value_set('r1'), r1)\n self.assertEqual(package.get_value_set('r2'), r2)\n self.assertIsNone(package.get_value_set('mystery-url'))", "def _validate_set(val):\n if not isinstance(val, set):\n raise ValueError(\"Passed value {} is not a set\".format(val))\n if not all([isinstance(char, str) for char in val]):\n raise ValueError(\"Passed overrides of non-string to overrides\")\n return val", "def SetOperator(self, op):\n return _hypre.HypreEuclid_SetOperator(self, op)", "def f_get(self):\n raise NotImplementedError(\"Should have implemented this.\")", "def _handler_command_set(self, *args, **kwargs):\n next_state = None\n result = None\n\n # Retrieve required parameter.\n # Raise if no parameter provided, or not a dict.\n try:\n params = args[0]\n \n except IndexError:\n raise InstrumentParameterException('Set command requires a parameter dict.')\n\n if not isinstance(params, dict):\n raise InstrumentParameterException('Set parameters not a dict.')\n \n # For each key, val in the dict, issue set command to device.\n # Raise if the command not understood.\n else:\n \n for (key, val) in params.iteritems():\n result = self._do_cmd_resp('set', key, val, **kwargs)\n self._update_params()\n \n return (next_state, result)", "def test_get_multi_argument(self):\n models.storage.close()\n models.storage = models.engine.db_storage.DBStorage()\n models.storage.reload()\n obj = self.populate()\n with self.assertRaises(TypeError):\n models.storage.get(type(obj[0]), obj[0].id, obj[1].id)" ]
[ "0.7555395", "0.7233269", "0.67569464", "0.65880513", "0.6474774", "0.6362361", "0.6110508", "0.6036238", "0.59842014", "0.5927305", "0.5922558", "0.58916026", "0.58916026", "0.58498436", "0.584614", "0.5840852", "0.5804425", "0.57713145", "0.5760654", "0.57327175", "0.57290214", "0.5722558", "0.5678018", "0.5669476", "0.56692034", "0.5665817", "0.5631979", "0.5596204", "0.5595003", "0.5586199", "0.5552465", "0.55212104", "0.5499675", "0.5496104", "0.5472697", "0.5471182", "0.5466176", "0.54300845", "0.5427432", "0.54267836", "0.5407505", "0.5407505", "0.5379881", "0.5375893", "0.5375692", "0.53743094", "0.5374026", "0.5364701", "0.53622794", "0.53529644", "0.5350011", "0.53392917", "0.53311723", "0.5322243", "0.53172415", "0.53087413", "0.52942175", "0.52942175", "0.52938443", "0.5287397", "0.528454", "0.5257125", "0.5247168", "0.5247168", "0.5240674", "0.5228871", "0.5223816", "0.521911", "0.5208863", "0.5202922", "0.51844245", "0.5175629", "0.5174665", "0.51723987", "0.51723987", "0.51723987", "0.51723987", "0.51723987", "0.51723987", "0.5166298", "0.51395", "0.5104123", "0.5094166", "0.50874543", "0.5084881", "0.5079765", "0.5075214", "0.5075214", "0.50737005", "0.5063548", "0.50589997", "0.5052586", "0.5051456", "0.50449383", "0.5034381", "0.5033393", "0.50293136", "0.50291777", "0.5023271", "0.5021903", "0.50188875" ]
0.0
-1
collect docker logs from servers $ command is $ log_collector.py
def main(): global tar_file_descr help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>' hosts = [] if len(sys.argv) == 2: if '-h' == sys.argv[1] or '--help' == sys.argv[1]: print(help_msg) sys.exit(0) elif 'all' == sys.argv[1]: # get logs from all hosts hosts = [] host_objs = CLIENT.host_get_all() for host_obj in host_objs: hosts.append(host_obj.name) else: # get logs from specified hosts hostnames = sys.argv[1].split(',') for host in hostnames: if host not in hosts: hosts.append(host) else: print(help_msg) sys.exit(1) # open tar file for storing logs fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_', suffix='.tgz') os.close(fd) # avoid fd leak with tarfile.open(tar_path, 'w:gz') as tar_file_descr: # clear out old logs if os.path.exists(LOGDIR): shutil.rmtree(LOGDIR) os.mkdir(LOGDIR) # gather logs from selected hosts try: for host in hosts: get_logs_from_host(host) # tar up all the container logs tar_file_descr.add(LOGDIR, arcname='container_logs') finally: # remove uncompressed logs if os.path.exists(LOGDIR): shutil.rmtree(LOGDIR) # gather dump output from kolla-cli dump_kolla_info() print('Log collection complete. Logs are at %s' % tar_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def do_logs(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['stdout'] = args.stdout\n opts['stderr'] = args.stderr\n opts['since'] = args.since\n opts['timestamps'] = args.timestamps\n opts['tail'] = args.tail\n opts = zun_utils.remove_null_parms(**opts)\n logs = cs.containers.logs(**opts)\n print(logs)", "def tail(name):\n\n try:\n container = CLIENT.containers.get(name)\n for line in container.logs(stream=True):\n click.secho(line.strip(), bg='blue', fg='white')\n except docker.errors.NotFound as err:\n print(err)", "def logs(self, container: Container) -> str:", "def stream_container_logs(container: Container) -> None:\n logs = container.logs(stream=True, follow=True)\n for log in logs:\n for line in log.splitlines():\n print(f'[Container {container.id[:5]}] {line.decode()}')", "def on_server_start(self):\n self._container = self._docker_client.containers.run(self.docker_image_name, detach=True, **self.docker_params)\n self.signal_ready()\n\n for log_line in self.get_lines():\n try:\n alert_dict = self.parse_line(log_line)\n if alert_dict:\n self.add_alert_to_queue(alert_dict)\n except Exception:\n self.logger.exception(None)", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def logging(containerids,filename):\n click.echo('*** LOGS CONSOLIDATION IS INITIATED')\n for x in containerids:\n click.echo(\"Got Logs for Container:\"+str(x))\n base = \"http://127.0.0.1:2376\"\n url = \"/containers/%s/logs?stderr=1&stdout=1&tail=100&stream=0\" % (str(x))\n try:\n resp = requests.get( base + url)\n except Exception as ex:\n template = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n click.echo(message)\n with click.open_file(filename, 'a+') as f:\n f.write(\"\\nContainerID(%s): \\n\" %x)\n for line in resp:\n f.write(str(line)+\"\\n\")", "def logs(name):\n\n try:\n container = CLIENT.containers.get(name)\n click.secho(str(container.logs()), bg='blue', fg='white')\n except docker.errors.NotFound as err:\n print(err)", "def collect_logs_from_pod(namespace, pod, logs_dir, k8s_cli):\n containers = get_list_of_containers_from_pod(namespace, pod, k8s_cli)\n init_containers = get_list_of_init_containers_from_pod(namespace, pod, k8s_cli)\n containers.extend(init_containers)\n if containers is None:\n logger.warning(\"Namespace '%s' Could not get containers for pod: %s list - \"\n \"skipping pods logs collection\", namespace, pod)\n return\n for container in containers:\n cmd = \"{} logs -c {} -n {} {}\" \\\n .format(k8s_cli, container, namespace, pod)\n with open(os.path.join(logs_dir, \"{}-{}.log\".format(pod, container)),\n \"w+\", encoding='UTF-8') as file_handle:\n _, output = run_shell_command(cmd)\n file_handle.write(output)\n\n # operator and admission containers restart after changing the operator-environment-configmap\n # getting the logs of the containers before the restart can help us with debugging potential bugs\n get_logs_before_restart_cmd = \"{} logs -c {} -n {} {} -p\" \\\n .format(k8s_cli, container, namespace, pod)\n err_code, output = run_shell_command(get_logs_before_restart_cmd)\n container_log_before_restart_file = os.path.join(logs_dir,\n '{}-{}-instance-before-restart.log'.format(pod, container))\n if err_code == 0: # Previous container instance found; did restart.\n with open(container_log_before_restart_file, \"w+\", encoding='UTF-8') as file_handle:\n file_handle.write(output)\n\n logger.info(\"Namespace '%s': + %s-%s\", namespace, pod, container)", "def _dump_docker_log(container_name: str, dir: Path) -> None:\n destination = dir / f\"{container_name}.log\"\n with open(destination, \"wb\") as out_stream:\n popen = subprocess.Popen(\n [\n \"docker\",\n \"logs\",\n \"--timestamps\",\n container_name,\n ],\n stdout=out_stream,\n )\n popen.wait()", "def main():\n lines = read_syslog()\n if len(sys.argv) > 1:\n lines = filter_logs(sys.argv[1], lines)\n for line in lines:\n print(line)", "def collect_log(self):\n path = 'cluster_test_%d/*.log' % self.address[1]\n src = \"%s@%s:%s\" % (self.user_name, self.address[0], path)\n dest = console_config._log_path\n self._rsync(src, dest)", "def getLogs():", "def getLogs():", "def logs():\n puts(yellow(\"[Reading log-file]\"))\n run(\"cat %s\" % REMOTE_ERR_FILE)\n run(\"cat %s\" % REMOTE_LOG_FILE)", "def mix():\n\n with open(\"output.log\", 'w') as outfile:\n log_file = [container.logs(timestamps=True).split(\",\") for container in\n CLIENT.containers.list()]\n for c_log in log_file:\n outfile.write(\" \".join(map(str, c_log)) + '\\n')\n click.secho('Log output of each container has been written to output.log.',\n bg='blue', fg='white')", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def logs(self, data):\n required = {'token', 'container_id'}\n api.validate(data, required)\n token = data['token']\n container_id = data['container_id']\n self.credentials_module.authorize_container(token,\n container_id)\n results = self.docker_module.logs_container(container_id)\n return results", "def getDockerOutput(imgName, command, client):\n cont = None\n try:\n cont = client.containers.create(image=imgName, command=command)\n cont.start()\n ret_code = cont.wait()\n if isinstance(ret_code, dict):\n ret_code = ret_code['StatusCode']\n logs = cont.logs(stdout=True, stderr=False, stream=False)\n cont.remove()\n except Exception as err:\n if cont:\n try:\n cont.remove()\n except Exception:\n pass\n logger.exception(\n 'Attempt to docker run %s %s failed', imgName, command)\n raise DockerImageError(\n 'Attempt to docker run %s %s failed ' % (\n imgName, command) + str(err), imgName)\n if ret_code != 0:\n raise DockerImageError(\n 'Attempt to docker run %s %s failed' % (imgName, command), imgName)\n return logs", "def collect_tcpdump(self, log_dir, count=10, timeout=30):\n log_type = \"tcpdump\"\n log_name = \"tcpdump.txt\"\n cmd = \"tcpdump -c {} > /tmp/{}\".format(count, log_name)\n\n self._collect_log(log_type, log_dir, log_name, cmd,\n timeout=timeout, background=False)", "def container_logs(ctx, token, container_id):\n try:\n out = ctx.obj.container_logs(token, container_id)\n print_message(out)\n except BaseException:\n m = (\"Error: No container related to %s\" %\n container_id)\n print_error(m)", "def collect_k8s_logs(cfg: ElasticBlastConfig):\n dry_run = cfg.cluster.dry_run\n k8s_ctx = cfg.appstate.k8s_ctx\n if not k8s_ctx:\n raise RuntimeError(f'kubernetes context is missing for {cfg.cluster.name}')\n # TODO use named constants for labels and containers\n # also modify corresponding YAML templates and their substitution\n get_logs(k8s_ctx, 'app=setup', [K8S_JOB_GET_BLASTDB, K8S_JOB_IMPORT_QUERY_BATCHES, K8S_JOB_SUBMIT_JOBS], dry_run)\n get_logs(k8s_ctx, 'app=blast', [K8S_JOB_BLAST, K8S_JOB_RESULTS_EXPORT], dry_run)", "def cli_copy_pcc_logs(host_ip:str, linux_user:str, linux_password:str)->dict:\n try:\n cmd = \"sudo rm -rf /tmp/logs; sudo docker cp pccserver:/home/logs/ /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd)\n os.makedirs(\"output/pccserver_logs\", exist_ok=True)\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/ansible.log\", \"output/pccserver_logs/ansible.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/default.log\", \"output/pccserver_logs/default.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/detailed.log\", \"output/pccserver_logs/detailed.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/error.log\", \"output/pccserver_logs/error.log\")\n cmd = \"sudo rm -rf /home/ceph/; sudo docker cp pccserver:/home/jobs/ceph /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd) \n os.makedirs(\"output/pccserver_logs/ceph\", exist_ok=True)\n cli_copy_folder_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/ceph/cluster/\",\"output/pccserver_logs/ceph/\")\n \n cmd = \"sudo rm -rf /tmp/logs; sudo docker cp platina-executor:/home/logs/ /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd)\n os.makedirs(\"output/platina_executor_logs\", exist_ok=True)\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/ansible.log\", \"output/platina_executor_logs/ansible.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/default.log\", \"output/platina_executor_logs/default.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/detailed.log\", \"output/platina_executor_logs/detailed.log\")\n cli_copy_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/logs/error.log\", \"output/platina_executor_logs/error.log\")\n cmd = \"sudo rm -rf /home/kubernetes/; sudo docker cp platina-executor:/home/jobs/kubernetes /tmp\"\n cli_run(host_ip, linux_user, linux_password, cmd) \n os.makedirs(\"output/platina_executor_logs/kubernetes\", exist_ok=True)\n cli_copy_folder_from_remote_to_local(host_ip, linux_user, linux_password, \"/tmp/kubernetes/cluster/\",\"output/platina_executor_logs/kubernetes/\")\n \n cmd = \"sudo rm -rf /output/logs\"\n os.system(cmd) \n \n return \"OK\"\n except Exception as e:\n return {\"Error\": str(e)}", "def cmd_logs(args):\n\n remote.show_log(_get_current_project_name(), num=args.num, tail=args.tail)", "def get_docker_logs(container_name):\n p = subprocess.run(\n [\"docker\", \"logs\", container_name],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n return p.stdout.decode()", "def CollectLogs(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[str, None]\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"collectLogs\", payload=payload, response_object=None)", "def docker_worker():", "def run(self):\n \n from subprocess import PIPE, Popen\n from shlex import split\n tag = 0\n old_r = ''\n location = '172.20.127.233'\n while 1:\n cmd = 'tcpdump -v -i eth1' \n p = Popen(split(cmd),stdout=PIPE) \n syslog_dict = {}\n for row in p.stdout:\n r = row\n if ('syslog' in r):\n tag = 1\n segment = old_r\n segment = segment + r\n elif tag == 1:\n tag = 2\n segment = segment + r\n elif tag == 2:\n tag = 0\n segment = segment + r\n tm = datetime.now().isoformat()\n name = '172.20.127.233'+':'+str(tm)\n type = 'syslog'\n syslog_dict[name]={'object-name':name,'object-type':type,'object-location':location,'location-type':'network','message-content':segment,'timestamp':datetime.now()}\n self.updates_and_deletes(syslog_dict)\n else:\n old_r =r\n #except KeyboardInterrupt:\n # p.terminate()\n\t\t\t######################\n # perform collection #\n # update and delete #\n ##################### \n # call super's function to perform updating and deleting\n #self.updates_and_deletes(parking_dict)\n #######################\n # sleep for some time #\n #######################\n #time.sleep(REFRESH_RATE)\n #time.sleep(sleep_time)", "def show_logs_for_running_containers(services, tail):\n if not check_for_docker_compose_file():\n log.info('No running containers found')\n sys.exit(1)\n\n try:\n if tail:\n run_docker_compose_command(['logs', '-f'] + services)\n else:\n run_docker_compose_command(['logs'] + services)\n except KeyboardInterrupt:\n sys.exit(0)", "def get_logs(k8s_ctx: str, label: str, containers: List[str], dry_run: bool = False):\n for c in containers:\n cmd = f'kubectl --context={k8s_ctx} logs -l {label} -c {c} --timestamps --since=24h --tail=-1'\n if dry_run:\n logging.info(cmd)\n else:\n try:\n # kubectl logs command can fail if the pod/container is gone, so we suppress error.\n # We can't combine it into one try-except-finally, because safe_exec should report\n # the command used in DEBUG level using old format with timestamps. New bare format\n # is used only after successful invocation of kubectl logs.\n proc = safe_exec(cmd)\n try:\n # Temporarily modify format for logging because we import true timestamps\n # from Kubernetes and don't need logging timestamps, so we just copy logs\n # verbatim.\n root_logger = logging.getLogger()\n orig_formatter = root_logger.handlers[0].formatter\n root_logger.handlers[0].setFormatter(logging.Formatter(fmt='%(message)s'))\n for line in proc.stdout.decode().split('\\n'):\n if line:\n logging.info(line)\n finally:\n # Ensure logging is restored to previous format\n # type is ignored because orig_formatter can be None\n # and there does not seem to be any other way to get\n # the original formatter from root logger\n root_logger.handlers[0].setFormatter(orig_formatter) # type: ignore\n except SafeExecError:\n pass", "def pull_dlc_images(images):\n for image in images:\n run(f\"docker pull {image}\", hide=\"out\")", "def print_logs(self, shell=False):\n for l, v in self.logs(shell).items():\n print('\\n### Container ', l, ', id ', v.get('id', 'None'), ' ###\\n')\n for part in ['stdout', 'stderr']:\n print('##', part, '##')\n print(v[part])", "def init():\n\n @click.command()\n @click.option('--cell',\n callback=cli.handle_context_opt,\n envvar='TREADMILL_CELL',\n expose_value=False,\n required=True)\n @click.argument('app-or-svc')\n @click.option('--host',\n help='Hostname where to look for the logs',\n required=True)\n @click.option('--uniq',\n help='The container uniq id',\n required=False)\n @click.option('--service',\n help='The name of the service for which the logs are '\n 'to be retreived',\n required=False)\n def logs(app_or_svc, host, uniq, service):\n \"\"\"View application's service logs.\"\"\"\n try:\n app, uniq, logtype, logname = app_or_svc.split('/', 3)\n except ValueError:\n app, uniq, logtype, logname = app_or_svc, uniq, 'service', service\n\n if any(param is None for param in [app, uniq, logtype, logname]):\n cli.bad_exit('Incomplete parameter list')\n\n _host, port = _nodeinfo_endpoint(host)\n\n api = 'http://{0}:{1}'.format(host, port)\n logurl = '/local-app/%s/%s/%s/%s' % (\n urllib_parse.quote(app),\n urllib_parse.quote(uniq),\n logtype,\n urllib_parse.quote(logname)\n )\n\n log = restclient.get(api, logurl)\n click.echo(log.text)\n\n return logs", "def collect_pods_logs(namespace, output_dir, k8s_cli, logs_from_all_pods=False):\n logger.info(\"Namespace '%s': Collecting pods' logs:\", namespace)\n logs_dir = os.path.join(output_dir, \"pods\")\n\n if logs_from_all_pods:\n pods = get_pod_names(namespace, k8s_cli)\n else:\n pods = []\n for selector in [\"app=redis-enterprise\", \"name=redis-enterprise-operator\"]:\n pods.extend(get_pod_names(namespace, k8s_cli, selector))\n\n if not pods:\n logger.warning(\"Namespace '%s' Could not get pods list - \"\n \"skipping pods logs collection\", namespace)\n return\n\n make_dir(logs_dir)\n\n for pod in pods:\n collect_logs_from_pod(namespace, pod, logs_dir, k8s_cli)", "def get_logs(self):\n logs_directory = self.protocol_config['logs']\n protocol_name = self.protocol_config['protocol']\n os.system(f'fab -f Execution/fabfile.py get_logs:{logs_directory} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def cli_truncate_pcc_logs(host_ip:str, linux_user:str, linux_password:str)->dict:\n try:\n \n cmd_remove_logs = \"sudo docker exec pccserver sh -c 'rm logs/*.log*';sudo docker exec platina-executor sh -c 'rm logs/*.log*'\"\n cli_run(host_ip, linux_user, linux_password, cmd_remove_logs)\n\n cmd_remove_archive = \"sudo docker exec pccserver sh -c 'rm -rf logs/archive';sudo docker exec platina-executor sh -c 'rm -rf logs/archive'\"\n cli_run(host_ip, linux_user, linux_password, cmd_remove_archive)\n\n cmd_remove_ansible_backup = \"sudo docker exec pccserver sh -c 'rm -rf logs/ansible-backup-logs';sudo docker exec platina-executor sh -c 'rm -rf logs/ansible-backup-logs'\"\n cli_run(host_ip, linux_user, linux_password, cmd_remove_ansible_backup)\n\n cmd_remove_k8s_logs=\"sudo docker exec platina-executor sh -c 'rm -r /home/jobs/kubernetes/cluster/*'\"\n cli_run(host_ip, linux_user, linux_password, cmd_remove_k8s_logs)\n \n cmd_remove_ceph_logs=\"sudo docker exec pccserver sh -c 'rm -r /home/jobs/ceph/cluster/*'\"\n cli_run(host_ip, linux_user, linux_password, cmd_remove_ceph_logs)\n\n cmd_truncate_logs = \"sudo docker exec pccserver sh -c 'truncate -s 0 logs/*.log';sudo docker exec platina-executor sh -c 'truncate -s 0 logs/*.log'\"\n return cli_run(host_ip, linux_user, linux_password, cmd_truncate_logs) \n \n except Exception as e:\n return {\"Error\": str(e)}", "def _collect_log(self, log_type, log_dir, log_name, cmd,\n timeout=10, background=True):\n log_tag = self.get_log_tag()\n target_log = posixpath.join('/tmp/', log_name)\n self.logger.info(\"{} Attempting to collect a {} log\".format(\n log_tag, log_type))\n status, _, _ = self.exec_command_ssh(cmd, background=background)\n\n if status != 0:\n raise Exception(\"{} '{}' command did not generate a log \"\n \"on the target\".format(log_tag, cmd))\n\n # Wait for the system to finish writing the log\n time.sleep(timeout)\n self.logger.info(\"{} Attempting to copy generated log from \"\n \"the target to the PC\".format(log_tag))\n dest = os.path.join(log_dir, log_name)\n with self.ssh_client as ssh_client:\n with ssh_client.open_sftp() as sftp:\n sftp.get(target_log, dest)\n\n self.logger.info(\"{} Attempting to delete log from \"\n \"target\".format(log_tag))\n status, _, _ = self.exec_command_ssh(\n \"rm /tmp/{}\".format(log_name))\n\n if status != 0:\n self.logger.error(\"{} Failed to delete log from \"\n \"target\".format(log_tag))\n\n self.logger.info(\"{} Log collection complete!\".format(log_tag))", "def GetLogs(self):\n stdout, _, _ = RunKubectlCommand(['logs', self.name])\n return stdout", "def collect_slog2info(self, log_dir):\n log_type = \"slog2info\"\n log_name = \"treerunner_slog2info.txt\"\n cmd = \"slog2info > /tmp/{}\".format(log_name)\n\n self._collect_log(log_type, log_dir, log_name, cmd)", "def log_cmd(cmd, cnt, args):\n if cnt > 1:\n log_file = args[1]\n start_logging(log_file)\n else:\n stop_logging()", "def main(logger):\n logger.info('Snapshot Reaper starting')\n keep_running = True\n while keep_running:\n logger.info(\"Connecting to vCenter {} as {}\".format(const.INF_VCENTER_SERVER, const.INF_VCENTER_USER))\n with vCenter(host=const.INF_VCENTER_SERVER, user=const.INF_VCENTER_USER,\n password=const.INF_VCENTER_PASSWORD) as vcenter:\n try:\n start_loop = time.time()\n reap_snapshots(vcenter, logger)\n except Exception as doh:\n logger.exception(doh)\n keep_running = False\n else:\n ran_for = int(time.time() - start_loop)\n logger.debug('Took {} seconds to check all snapshots'.format(ran_for))\n loop_delta = LOOP_INTERVAL - ran_for\n sleep_for = max(0, loop_delta)\n time.sleep(sleep_for)", "def cmd_line(argv):\n parser = argparse.ArgumentParser(description='Manage logs')\n parser.add_argument('-p', '--print-with-response', default=None, help='Print messages with specified response.')\n parser.add_argument('-s', '--start-time', default='1943-01-01-00', help='Start time to filter.')\n parser.add_argument('-e', '--end-time', default='9999-01-01-00', help='end time to filter.')\n parser.add_argument('--send-logs', default=None, help='Send logs to host and directory.')\n parser.add_argument('--send-logs-user', default=getpass.getuser(), help='User to login to the remote server as.')\n parser.add_argument('--key-file', default=os.path.expanduser('~/.ssh/id_rsa'), help='Keyfile to use for login to remote server.')\n parser.add_argument('fileglob')\n\n args = parser.parse_args()\n return args", "def run(self):\n print('Starting CloudWatchLogsMonitor.')\n\n # Initialize pool for multithreading.\n # Use ThreadPool for shared memory (used for keeping track of last polled timestamp)\n pool = ThreadPool()\n\n while True:\n\n # Check for new LogGroups and LogStreams.\n self.update()\n\n for log_group in self.log_groups:\n # For every log group get and append log events to log file.\n # This is run in parallel and is non-blocking.\n pool.map_async(LogStream.get_and_append_log_events, log_group.log_streams)\n\n # These lines run the agent synchronously.\n # You need to comment out the pool.map_async line above if using synchronous loop.\n # for log_stream in log_group.log_streams:\n # LogStream.get_and_append_log_events(log_stream)\n\n # Sleep for the polling interval.\n time.sleep(self.default_polling_interval)", "def PullLogs(ssh, log_files, download_folder):\n for log_file in log_files:\n target_file = os.path.join(download_folder, os.path.basename(log_file))\n ssh.ScpPullFile(log_file, target_file)\n _DisplayPullResult(download_folder)", "def display_pipeline_log(ctx,\n pipeline_name,\n username,\n password,\n ip_address,\n interactive):\n slab_logger.info('Displaying %s log' % pipeline_name)\n if not username:\n username = ctx.get_username()\n if not password:\n password = ctx.get_password(interactive)\n if not password or not username:\n slab_logger.error(\"Username is %s and password is %s. \"\n \"Please, set the correct value for both and retry.\" %\n (username, password))\n sys.exit(1)\n stages_url = \"http://{0}/go/api/pipelines/{1}/stages.xml\"\n # Find latest run info\n res = requests.get(stages_url.format(ip_address, pipeline_name),\n auth=HTTPBasicAuth(username, password))\n soup = BeautifulSoup(res.content, \"html.parser\")\n try:\n latest_job_info_url = soup.findAll(\n 'entry')[0].findAll('link')[0]['href']\n except Exception as ex:\n slab_logger.error(\"Internal error occurred. Please, check arguments supplied.\")\n slab_logger.error(\"Error details : %s \" % (ex))\n sys.exit(1)\n\n # Find all the job info for that run\n latest_job_info_url = latest_job_info_url.replace(\"gocd_java_server\",\n ip_address)\n job_info_res = requests.get(latest_job_info_url,\n auth=HTTPBasicAuth(username, password))\n soup = BeautifulSoup(job_info_res.content, \"html.parser\")\n job_urls = soup.findAll('job')\n\n # for each of the job, pull the log and display the log\n for job_url in job_urls:\n job_url['href'] = job_url['href'].replace(\"gocd_java_server\",\n ip_address)\n job_url_res = requests.get(job_url['href'],\n auth=HTTPBasicAuth(username, password))\n soup = BeautifulSoup(job_url_res.content, \"html.parser\")\n log_url = soup.find('artifacts')['baseuri']\n log_url = log_url.replace(\"gocd_java_server\", ip_address)\n log_url_res = requests.get(log_url + \"/cruise-output/console.log\",\n auth=HTTPBasicAuth(username, password))\n soup = BeautifulSoup(log_url_res.content, \"html.parser\")\n print \"\\n\\n-------------------Printing job log for pipeline : \", \\\n log_url, \"-------------------------\"\n print soup\n print \"\\n\\n-------------------End of job log for pipeline : \", \\\n log_url, \"-------------------------\"", "def add_docker():\n pass\n # can i query docker for the location of the docker for the location of the\n # socket?\n # `docker info` will return \"Docker Root Dir: /var/lib/docker\"\n # check CoreOs project for detail on nvidia-docker socket", "def collect_all_logs(self, log_dir):\n log_tag = self.get_log_tag()\n self.logger.info(\"{} Gathering the list of log collections \"\n \"supported\".format(log_tag))\n\n collect_methods = [method_name for method_name in dir(self)\n if callable(getattr(self, method_name))\n and \"collect_\" in method_name and\n \"_collect\" not in method_name and\n \"collect_all_logs\" not in method_name]\n\n for method in collect_methods:\n self.logger.info(\"{} About to {}\".format(\n log_tag, method))\n collect_method = getattr(\n self, method, lambda: \"Invalid log collection method\")\n collect_method(log_dir=log_dir)", "def logs(app_or_svc, host, uniq, service):\n try:\n app, uniq, logtype, logname = app_or_svc.split('/', 3)\n except ValueError:\n app, uniq, logtype, logname = app_or_svc, uniq, 'service', service\n\n if any(param is None for param in [app, uniq, logtype, logname]):\n cli.bad_exit('Incomplete parameter list')\n\n _host, port = _nodeinfo_endpoint(host)\n\n api = 'http://{0}:{1}'.format(host, port)\n logurl = '/local-app/%s/%s/%s/%s' % (\n urllib_parse.quote(app),\n urllib_parse.quote(uniq),\n logtype,\n urllib_parse.quote(logname)\n )\n\n log = restclient.get(api, logurl)\n click.echo(log.text)", "def manage_logs(stream, component, mip_convert_config_dir,\n cylc_task_cycle_point):\n logger = logging.getLogger(__name__)\n setup_logger(logger)\n logger.info('Managing logs')\n # suffix is a variable used in the suite.rc that is a useful shorthand.\n suffix = '_'.join([stream, component])\n dir_stem = os.path.join(mip_convert_config_dir, 'log',\n suffix, cylc_task_cycle_point)\n work = [('cmor_logs', 'cmor*.log'),\n ('mip_convert_cfgs', 'mip_convert.*.cfg'),\n ('mip_convert_logs', 'mip_convert*.log')]\n for dir_name, file_pattern in work:\n destination = os.path.join(dir_stem, dir_name)\n if os.path.exists(destination):\n if os.path.isdir(destination):\n logger.info('Log directory \"{}\" already exists.'\n ''.format(destination))\n else:\n raise RuntimeError('Expected \"{}\" to be a directory.'\n ''.format(destination))\n else:\n logger.info('Making log directory \"{}\"'.format(destination))\n os.makedirs(destination, LOG_DIRECTORY_PERMISSIONS)\n os.chmod(destination, LOG_DIRECTORY_PERMISSIONS)\n # Note that we are already in the working directory where MIP Convert\n # is run and as such all the log files are in the current working\n # directory.\n files_to_archive = glob.glob(file_pattern)\n for file_to_archive in files_to_archive:\n return_code = subprocess.call(['gzip', file_to_archive])\n if return_code > 0:\n logger.warning('Failed to gzip \"{}\".'.format(file_to_archive))\n else:\n file_to_archive = '{}.gz'.format(file_to_archive)\n dest_file_name = os.path.join(destination, file_to_archive)\n if os.path.exists(dest_file_name):\n continue\n logger.info('Archiving \"{}\" to \"{}.gz\"'.format(files_to_archive,\n dest_file_name))\n shutil.copy(file_to_archive, dest_file_name)", "def watch():\n\n try:\n headers = ('CONTAINER ID', 'NAME', 'CPU %', 'MEM USAGE / LIMIT',\n 'MEM %', 'NET I/O', 'BLOCK I/O', 'PIDS')\n column_width = 20\n for element in headers:\n print(element.ljust(column_width)),\n print('')\n\n for container in CLIENT.containers.list():\n column_width = 20\n stats = container.stats(stream=False)\n\n # Block I/O stats\n blkio = stats.get('blkio_stats').get('io_service_bytes_recursive')\n # in case blkio is empty --> IndexError: list index out of range\n if not blkio:\n blkio_read = '0'\n blkio_write = '0'\n else:\n blkio_read = size(blkio[0].get('value'), system=si)\n blkio_write = size(blkio[1].get('value'), system=si)\n\n # Network stats\n rx_stats = size(stats.get('networks').get('eth0').get('rx_bytes'), system=si)\n tx_stats = size(stats.get('networks').get('eth0').get('tx_bytes'), system=si)\n\n # Memory stats\n mem = stats.get('memory_stats')\n mem_usage = mem.get('stats').get('active_anon')\n mem_limit = mem.get('limit')\n mem_percent = (\"%.2f\"%((mem_usage / mem_limit)*100))\n\n # CPU stats\n # this is taken directly from docker CLIENT:\n # https://github.com/docker/docker/blob/28a7577a029780e4533faf3d057ec9f6c7a10948/api/CLIENT/stats.go#L309\n cpu_percent = 0.0\n cpu = stats.get('cpu_stats')\n pre_cpu = stats.get('precpu_stats')\n cpu_total = cpu.get('cpu_usage').get('total_usage')\n pre_cpu_total = pre_cpu.get('cpu_usage').get('total_usage')\n cpu_count = cpu.get('online_cpus')\n\n cpu_delta = cpu_total - pre_cpu_total\n system_delta = cpu.get('system_cpu_usage') - pre_cpu.get('system_cpu_usage')\n\n if system_delta > 0.0 and cpu_delta > 0.0:\n cpu_percent = (\"%.2f\"%(cpu_delta / system_delta * 100.0 * cpu_count))\n\n # container attributes\n attrs = [(str(container.short_id), str(container.name), str(cpu_percent),\n str(size((mem_usage), system=si) + \" / \" + size((mem_limit), system=si)),\n str(mem_percent), str(rx_stats + \" / \" + tx_stats),\n str(blkio_read + \" / \" + blkio_write),\n str(stats.get('pids_stats').get('current')))]\n\n for row in attrs:\n for element in row:\n print(element.ljust(column_width)),\n print('')\n\n except (docker.errors.NotFound, KeyError, AttributeError):\n print('No such container or container not running!')", "def main():\n logging.basicConfig() # create a baseconfiguration s.t. we cann now log \n cycle = 0\n while True:\n\n logging.info(f\"{time.now()} - Start cycle {cycle}\") # changed from print to info \n do_unstable_magick(cycle)\n logging.info(f\"{time.nos()} - Finished cycle {cycle}\")", "def query_logs_command(args: dict, client: Client) -> Tuple[str, Dict[str, List[dict]], List[Dict[str, Any]]]:\n query = args.get('query', '')\n limit = args.get('limit', '')\n transform_results = argToBoolean(args.get('transform_results', 'true'))\n\n if 'limit' not in query.lower():\n query += f' LIMIT {limit}'\n\n records, raw_results = client.query_loggings(query)\n\n table_name = get_table_name(query)\n output_results = records if not transform_results else [common_context_transformer(record) for record in records]\n human_readable = tableToMarkdown('Logs ' + table_name + ' table', output_results, removeNull=True)\n ec = {\n 'CDL.Logging': output_results\n }\n return human_readable, ec, raw_results", "def execute_to_log(cmd, logfile, timeout=-1,\n watch_logs=[\n ('[syslog]', '/var/log/syslog'),\n ('[sqlslo]', '/var/log/mysql/slow-queries.log'),\n ('[sqlerr]', '/var/log/mysql/error.log')\n ],\n heartbeat=True, env=None, cwd=None\n ):\n\n if not os.path.isdir(os.path.dirname(logfile)):\n os.makedirs(os.path.dirname(logfile))\n\n logger = logging.getLogger(logfile)\n log_handler = logging.FileHandler(logfile)\n log_formatter = logging.Formatter('%(asctime)s %(message)s')\n log_handler.setFormatter(log_formatter)\n logger.addHandler(log_handler)\n\n descriptors = {}\n\n for watch_file in watch_logs:\n if not os.path.exists(watch_file[1]):\n logger.warning('Failed to monitor log file %s: file not found'\n % watch_file[1])\n continue\n\n try:\n fd = os.open(watch_file[1], os.O_RDONLY)\n os.lseek(fd, 0, os.SEEK_END)\n descriptors[fd] = {'name': watch_file[0],\n 'poll': select.POLLIN,\n 'lines': ''}\n except Exception as e:\n logger.warning('Failed to monitor log file %s: %s'\n % (watch_file[1], e))\n\n cmd += ' 2>&1'\n start_time = time.time()\n p = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=env, cwd=cwd)\n\n descriptors[p.stdout.fileno()] = dict(\n name='[output]',\n poll=(select.POLLIN | select.POLLHUP),\n lines=''\n )\n\n poll_obj = select.poll()\n for fd, descriptor in descriptors.items():\n poll_obj.register(fd, descriptor['poll'])\n\n last_heartbeat = time.time()\n\n def process(fd):\n \"\"\" Write the fd to log \"\"\"\n global last_heartbeat\n descriptors[fd]['lines'] += os.read(fd, 1024 * 1024)\n # Avoid partial lines by only processing input with breaks\n if descriptors[fd]['lines'].find('\\n') != -1:\n elems = descriptors[fd]['lines'].split('\\n')\n # Take all but the partial line\n for l in elems[:-1]:\n if len(l) > 0:\n l = '%s %s' % (descriptors[fd]['name'], l)\n logger.info(l)\n last_heartbeat = time.time()\n # Place the partial line back into lines to be processed\n descriptors[fd]['lines'] = elems[-1]\n\n while p.poll() is None:\n if timeout > 0 and time.time() - start_time > timeout:\n # Append to logfile\n logger.info(\"[timeout]\")\n os.kill(p.pid, 9)\n\n for fd, flag in poll_obj.poll(0):\n process(fd)\n\n if time.time() - last_heartbeat > 30:\n # Append to logfile\n logger.info(\"[heartbeat]\")\n last_heartbeat = time.time()\n\n # Do one last write to get the remaining lines\n for fd, flag in poll_obj.poll(0):\n process(fd)\n\n # Clean up\n for fd, descriptor in descriptors.items():\n poll_obj.unregister(fd)\n os.close(fd)\n try:\n p.kill()\n except OSError:\n pass\n\n logger.info('[script exit code = %d]' % p.returncode)\n logger.removeHandler(log_handler)\n log_handler.flush()\n log_handler.close()\n return p.returncode", "def collect_hogs(self, log_dir, iterations=1, timeout=30):\n log_type = \"hogs\"\n log_name = \"hogs.txt\"\n cmd = \"hogs -i {} > /tmp/{}\".format(\n str(iterations), log_name)\n\n self._collect_log(log_type, log_dir, log_name, cmd,\n timeout=timeout)", "def getContainerInitLog(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/initlog' % (node,vmid),None)\n return data", "def cat_cmd(server, client, line):\n if len(line.split(' ')) > 1 and line.split(' ')[1] == \"/proc/mounts\":\n path = os.path.dirname(os.path.realpath(__file__))\n path = path[:-7] # shaves off /engine\n with open(\"{}/fakefiles/proc%mounts\".format(path), \"r\") as f:\n response = f.read()\n client.exit_status = 0\n else:\n response = client.run_in_container(line)\n client.send(response)", "def setup_logging():\n logging.basicConfig(\n filename=os.getenv(\"SERVICE_LOG\", \"server.log\"),\n level=logging.DEBUG,\n format=\"%(levelname)s: %(asctime)s pid:%(process)s module:%(module)s %(message)s\",\n datefmt=\"%d/%m/%y %H:%M:%S\",\n )", "def collect_log_output(activity_log, result):\n output = activity_log.get('emittedOutput')\n if output:\n result.append(output['_value'])\n else:\n subsections = activity_log.get('subsections')\n if subsections:\n for subsection in subsections['_values']:\n collect_log_output(subsection, result)", "def run(args):\n docker(' '.join(args))", "def logcat(self, limit=1000):\n result, counter = [], 0\n\n output = self.android_device_driver.adb.raw_cmd(\"logcat\")\n for stdout_line in iter(output.stdout.readline, \"\"):\n counter += 1\n result.append(stdout_line)\n if counter == limit:\n break\n output.stdout.close()\n return result", "def collect_logs(self):\n logs = glob.glob(f\"{self.production.rundir}/*.err\") #+ glob.glob(f\"{self.production.rundir}/*/logs/*\")\n logs += glob.glob(f\"{self.production.rundir}/*.out\")\n messages = {}\n for log in logs:\n with open(log, \"r\") as log_f:\n message = log_f.read()\n messages[log.split(\"/\")[-1]] = message\n return messages", "def stats(containerids, stream):\n click.echo('*** MONITORING IS INITIATED')\n if(stream):\n while True:\n for x in containerids:\n index = slice(12)\n monitoring(containerid=x[index])\n else:\n for x in containerids:\n index = slice(12)\n monitoring(containerid=x[index])", "def dinghy_get_pod_logs(req, resp):\n resp.content = api.template(\n 'pod_logs.html'\n )", "def pullall():\n\tprint(red('\\t\\tStarting download of QNIBTerminal images\\t\\t'))\n\t# pull all the needed images\n\tdocker_images={'fd20','terminal','helixdns','elk','slurm','compute'}\n\tfor image in docker_images:\n\t\tprint 'docker pull qnib/'+image\n\t\t# run('docker pull qnib/'+image)", "def log_output(stream):\n for chunk in stream:\n logger.debug(chunk)\n # error handling requires looking at the response body\n if '\"error\"' in chunk.lower():\n raise docker.errors.DockerException(chunk)", "def setup():\n # derive log file name from script name\n log_filename = '{}{:s}'.format(pathlib.Path(__file__).resolve().stem, '.log')\n\n # read command line arguments (https://docs.python.org/3/howto/argparse.html)\n argparser = argparse.ArgumentParser(description='Collects official SARS-CoV-2 infection statistics published by the city of Dresden.')\n arg_group_inputs = argparser.add_argument_group('input options', 'by default, the data is obtained online from the city\\'s official source, but other import options are also available')\n arg_group_timestamps = argparser.add_mutually_exclusive_group()\n arg_group_outputs = argparser.add_argument_group('output options', 'new data is saved in InfluxDB by default; this and other behaviour concerning data writing can be adjusted with these output options')\n arg_group_outputs.add_argument('-a', '--archive-json', help='archive JSON file each time new data is found or force-collected', action='store_true')\n argparser.add_argument('-c', '--force-collect', help='store JSON data, regardless of whether new data points have been found or not', action='store_true')\n arg_group_timestamps.add_argument('-d', '--date', help='set publishing date manually for the new data set, e. g. \\'2020-10-18T09:52:41Z\\'')\n arg_group_inputs.add_argument('-f', '--file', help='load JSON data from a local file instead from server; if no publishing date is passed with the \\'--date\\' or \\'--auto-date\\' option, an attempt is made to read the date from the filename', nargs='?', type=argparse.FileType('r'), const='query.json') # 'const' is used, if '--file' is passed without an argument; default=sys.stdin; https://stackoverflow.com/a/15301183/7192373\n arg_group_outputs.add_argument('-l', '--log', help='save log in file \\'{:s}\\''.format(log_filename), action='store_true')\n arg_group_outputs.add_argument('-n', '--no-cache', help='suppress the saving of a JSON cache file (helpful if you do not want to mess with an active cron job looking for changes)', action='store_true')\n arg_group_outputs.add_argument('-o', '--output-dir', help='set a user defined directory where data (cache, logs and JSONs) are stored; default: directory of this Python script', default=pathlib.Path(pathlib.Path(__file__).resolve().parent, OUTPUT_FOLDER)) # use absolute path of this Python folder as default directory\n arg_group_outputs.add_argument('-s', '--skip-influxdb', help='check for and write new JSON data only, do not write to InfluxDB', action='store_true')\n arg_group_timestamps.add_argument('-t', '--auto-date', help='do not try to to parse the publishing date from the filename, instead write current date (UTC) to database', action='store_true')\n arg_group_inputs.add_argument('-u', '--url', help='URL to be used to check for JSON updates; default: \\'arcgis\\'', choices=['arcgis', 'github'], default='arcgis', type=str.lower)\n argparser.add_argument('-v', '--verbose', help='print debug messages', action='store_true')\n\n global args\n args = argparser.parse_args()\n\n if args.verbose:\n log_level = logging.DEBUG\n else:\n log_level = logging.INFO\n\n # setup logging\n global logger\n logger = logging.getLogger()\n logger.setLevel(log_level)\n\n # log format\n logging_format = '[%(asctime)s] %(levelname)s %(message)s' # %(name)s.%(funcName)s %(pathname)s:\n log_formatter = logging.Formatter(logging_format) #, datefmt=\"%Y-%m-%dT%H:%M:%S\")\n\n # log to console\n handler = logging.StreamHandler()\n handler.setFormatter(log_formatter)\n logger.addHandler(handler)\n\n # get path for output\n global output_dir\n try:\n output_dir = pathlib.Path(args.output_dir)\n except TypeError:\n logger.error(f'Could not resolve output directory \\'{args.output_dir}\\'.')\n sys.exit()\n\n # log to file\n if args.log:\n handler = logging.handlers.RotatingFileHandler(pathlib.Path(output_dir, log_filename), maxBytes=2**20, backupCount=5) # https://stackoverflow.com/a/13733777/7192373; https://docs.python.org/3/library/logging.handlers.html#logging.handlers.RotatingFileHandler\n handler.setFormatter(log_formatter)\n logger.addHandler(handler)\n\n # setup DB connection\n if not args.skip_influxdb:\n global db_client\n db_client = InfluxDBClient(host='localhost', port=8086) # https://www.influxdata.com/blog/getting-started-python-influxdb/\n db_client.create_database(INFLUXDB_DATABASE)\n db_client.switch_database(INFLUXDB_DATABASE)", "def collect_pod_rs_logs(namespace, output_dir, k8s_cli, mode):\n rs_pod_logs_dir = os.path.join(output_dir, \"rs_pod_logs\")\n selector = get_selector(mode)\n rs_pod_names = get_pod_names(namespace=namespace, k8s_cli=k8s_cli, selector=selector)\n if not rs_pod_names:\n logger.warning(\"Namespace '%s' Could not get rs pods list - \"\n \"skipping rs pods logs collection\", namespace)\n return\n make_dir(rs_pod_logs_dir)\n # TODO restore usage of get_non_ready_rs_pod_names once RS bug is resolved (RED-51857) # pylint: disable=W0511\n for rs_pod_name in rs_pod_names:\n pod_log_dir = os.path.join(rs_pod_logs_dir, rs_pod_name)\n make_dir(pod_log_dir)\n cmd = \"cd \\\"{}\\\" && {} -n {} cp {}:{} ./ -c {}\".format(pod_log_dir,\n k8s_cli,\n namespace,\n rs_pod_name,\n RS_LOG_FOLDER_PATH,\n RLEC_CONTAINER_NAME)\n return_code, out = run_shell_command(cmd)\n if return_code:\n logger.warning(\"Failed to copy rs logs from pod \"\n \"to output directory, output:%s\", out)\n\n else:\n logger.info(\"Namespace '%s': \"\n \"Collected rs logs from pod marked as not ready, pod name: %s\", namespace, rs_pod_name)\n\n pod_config_dir = os.path.join(pod_log_dir, \"config\")\n make_dir(pod_config_dir)\n cmd = \"cd \\\"{}\\\" && {} -n {} cp {}:{} ./ -c {}\".format(pod_config_dir,\n k8s_cli,\n namespace,\n rs_pod_name,\n \"/opt/redislabs/config\",\n RLEC_CONTAINER_NAME)\n return_code, out = run_shell_command(cmd)\n if return_code:\n logger.warning(\"Failed to copy rs config from pod \"\n \"to output directory, output:%s\", out)\n\n else:\n logger.info(\"Collected rs config from pod marked as not ready, pod name: %s\", rs_pod_name)", "def main(arguments):\n auth = (arguments['username'], arguments['token'])\n data_collector = DataCollector(arguments['repo name'],\n arguments['start date'],\n arguments['end date'], auth,\n arguments['all'], arguments['page'])\n data_collector.collect_signals()", "def main():\n custom_logger=Custom_log(logger_name=\"custom_name\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.DEBUG,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=False)\n custom_logger.logger.info(\"log this\")\n custom_logger.logger.debug(\"this is debbuging message\")\n custom_logger.logger.error(\"oops something bad happened\")\n custom_logger.logger.critical(\"this will break\")\n custom_logger2=Custom_log(logger_name=\"custom_logger2\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.DEBUG,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=True,file_path=\"logs.log\",file_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_stream_level=logging.INFO)\n custom_logger2.logger.info(\"first log\")\n #custom_logger.print_all(logger_name=\"custom_name\",logger_level=logging.DEBUG,console_log=True,console_stream_level=logging.INFO,console_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',file_log=False)", "def collect_stats(xcnode, cmds):\n output = ''\n\n if not xcnode.client:\n print 'ssh session does not exist for {}'.format(xcnode.host)\n return output\n\n for cmd in cmds:\n stdin, stdout, stderr = xcnode.client.exec_command(cmd)\n out = stdout.read()\n outerr = stderr.read()\n xcnode.fd.write('{} run @ {}\\n'.format(cmd, datetime.now()))\n xcnode.fd.write('stdout:\\n============:\\n{}\\n'.format(out))\n if outerr:\n xcnode.fd.write('stderr\\n===========:\\n{}\\n'.format(outerr))\n output += out + '\\n'\n output += outerr + '\\n'\n xcnode.fd.flush()\n\n return output", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def log_command(command: List[str]) -> None:\n logger.debug(\"Command: \" + \" \".join(command))", "def get_critical_logs_command(args: dict, client: Client) -> Tuple[str, Dict[str, List[dict]], List[Dict[str, Any]]]:\n logs_amount = args.get('limit')\n query_start_time, query_end_time = query_timestamp(args)\n query = 'SELECT * FROM `firewall.threat` WHERE severity = \"Critical\" ' # guardrails-disable-line\n query += f'AND time_generated BETWEEN TIMESTAMP(\"{query_start_time}\") AND ' \\\n f'TIMESTAMP(\"{query_end_time}\") LIMIT {logs_amount}'\n\n records, raw_results = client.query_loggings(query)\n\n transformed_results = [threat_context_transformer(record) for record in records]\n\n human_readable = tableToMarkdown('Logs threat table', transformed_results, removeNull=True)\n ec = {\n 'CDL.Logging.Threat': transformed_results\n }\n return human_readable, ec, raw_results", "def consume(docker_client, redis_client):\n print 'Start consuming events from %s' % docker_client.base_url\n since = redis_client.get('castor:last_event')\n for event in docker_client.events(decode=True, since=since):\n for hook in settings.HOOKS:\n tasks.dispatch_event.delay(event, hook)\n redis_client.set('castor:last_event', event['time'])", "def run_and_log_output(cmd_string):\n logging.info('Running %s', cmd_string)\n c = iterpipes.cmd(cmd_string)\n out = iterpipes.run(c)\n for line in out:\n logging.info(line)", "def log(msg):\n\n print('datastore: %s' % msg)", "def main(\n src: str,\n dest: str,\n logfile: str,\n geo: Tuple[float, float] = None,\n alt: float = None,\n tags: Iterable[str] = None,\n) -> None:\n if not (os.path.exists(src)):\n raise FileNotFoundError(f'source path \"{src}\" not found')\n if not (os.path.exists(dest)):\n raise FileNotFoundError(f'destination path \"{dest}\" not found')\n if not (os.path.exists(logfile)):\n raise FileNotFoundError(f'json log file \"{logfile}\" not found')\n\n with open(logfile) as fp:\n for line in fp:\n try:\n logline = json.loads(line.strip())\n img_date = datetime.datetime.fromisoformat(logline[\"date\"])\n print(logline[\"date\"])\n process_image(\n src=os.path.join(src, logline[\"outfile\"]),\n dest=os.path.join(dest, logline[\"outfile\"]),\n desc=logline[\"description\"],\n timestamp=img_date,\n geo=geo,\n altitude=alt,\n tags=tags,\n )\n break\n except JSONDecodeError:\n pass", "def update_container(self):\n self.logger.info(\" > Update {} sql container\".format(self.service))\n\n for dump_filename in sorted(self.dump_files.keys()):\n file_path = self.dump_path.format(dump_filename)\n copy_command = \"docker cp {} {}:/docker-entrypoint-initdb.d/\".format(file_path, self.mysql_container)\n self.shell(copy_command, message=\" copy {}\".format(dump_filename))", "def main():\n # handle command line argument\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config',\n metavar='CONFIG_FILE',\n help='dataserver config file',\n default='./conf/ds.cfg')\n args = parser.parse_args()\n config_file = args.config\n\n # get config options\n config = configparser.ConfigParser()\n config.read(config_file)\n\n # init logging\n logger = logging.getLogger() # get the 'root' logger\n level = getattr(logging, config.get('log', 'log_level'))\n logger.setLevel(level)\n log_name = config.get('log', 'log_name')\n log_max_bytes = config.getint('log', 'log_max_bytes')\n log_file_num = config.getint('log', 'log_file_num')\n handler = logging.handlers.RotatingFileHandler(log_name,\n maxBytes=log_max_bytes,\n backupCount=log_file_num)\n log_format = logging.Formatter('%(levelname)-8s[%(asctime)s.%(msecs)d]'\n '<%(module)s> %(funcName)s:%(lineno)d:'\n ' %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n handler.setFormatter(log_format)\n logger.addHandler(handler)\n\n # start server\n ds_ = DSServer(config)\n ds_.start()", "def log_setup():\n logger = logging.getLogger('diskover')\n logger_warn = logging.getLogger('diskover_warn')\n eslogger = logging.getLogger('elasticsearch')\n diskover_eslogger = logging.getLogger('diskover_elasticsearch')\n loglevel = config['logLevel'].get()\n if options.debug:\n loglevel = 'DEBUG'\n if loglevel == 'DEBUG':\n loglevel = logging.DEBUG\n elif loglevel == 'INFO':\n loglevel = logging.INFO\n else:\n loglevel = logging.WARN\n logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n if logtofile:\n # create log file name using top dir names and datestamp\n treedirsstr = ''\n if args:\n n = 1\n dirs = args[0:]\n x = len(dirs)\n for d in dirs:\n if d != '/':\n d = d.rstrip('/')\n treedirsstr += os.path.basename(d)\n if n < x:\n treedirsstr += '_'\n n += 1\n else:\n treedirsstr = os.path.basename(os.getcwd())\n logfiletime = datetime.now().isoformat()\n logname = 'diskover_' + treedirsstr + '_' + logfiletime + '.log'\n logfile = os.path.join(logdir, logname)\n handler_file = logging.FileHandler(logfile)\n handler_file.setFormatter(logging.Formatter(logformat))\n logger.setLevel(loglevel)\n logger.addHandler(handler_file)\n # console logging\n handler_con = logging.StreamHandler()\n handler_con.setFormatter(logging.Formatter(logformat))\n logger.addHandler(handler_con)\n # warnings log\n logname_warn = 'diskover_' + treedirsstr + '_' + logfiletime + '_warnings.log'\n logfile_warn = os.path.join(logdir, logname_warn)\n handler_warnfile = logging.FileHandler(logfile_warn)\n handler_warnfile.setFormatter(logging.Formatter(logformat))\n logger_warn.setLevel(logging.WARN)\n logger_warn.addHandler(handler_warnfile)\n # es logger\n eslogger.setLevel(logging.WARN)\n eslogger.addHandler(handler_file)\n eslogger.addHandler(handler_con)\n # diskover es logger\n diskover_eslogger.setLevel(loglevel)\n diskover_eslogger.addHandler(handler_file)\n diskover_eslogger.addHandler(handler_con)\n else:\n handler_file = None\n handler_warnfile = None\n handler_con = None\n logging.basicConfig(format=logformat, level=loglevel)\n eslogger.setLevel(logging.WARN)\n return logger, logger_warn, loglevel, logformat, \\\n handler_file, handler_warnfile, handler_con", "def stream_logs(dauth_directory: DauthDirectoryConnection) -> None:\n try:\n for line in dauth_directory.streams_logs():\n print(line.strip())\n except KeyboardInterrupt:\n print()\n pass", "def processEventLog(log):\n pass", "def run(ws):\n ws.docker_run()", "async def _record_logs(self, report):\n\t\tif report.action == Frame.Report.PARSE:\n\t\t\t# Collects the tests parsing log for further writing to Test_Parser.log\n\t\t\tif report.success:\n\t\t\t\tself._parse_logs[\"success\"] += [report.log]\n\t\t\telse:\n\t\t\t\tself._parse_logs[\"failure\"] += [report.log]\n\t\telif report.action == Frame.Report.EXECUTE:\n\t\t\t# Writes a test log and dump to the results directory\n\t\t\ttest_log = (\"EXECUTE STATUS: SUCCESS\\n\\n\" if report.success else \"EXECUTE STATUS: FAILURE\\n\\n\") + report.log\n\t\t\tfor task in as_completed([self._event_loop.run_in_executor(self._thread_executor, FileSystem.dump_to, \n\t\t\t\t self._result_directory_name + \"/Log/\" + report.test_name + \".log\", test_log)]):\n\t\t\t\tawait task\n\t\t\tfor task in as_completed([self._event_loop.run_in_executor(self._thread_executor, TestLogger._write_test_dump, \n\t\t\t\t self._result_directory_name + \"/Dump/\" + report.test_name + \".pcap\", report.dump)]):\n\t\t\t\tawait task", "def fetch_run_logs(id_, **kwargs):\n run = get_run_object(id_)\n check_run_permission(run, kwargs[\"token_info\"])\n query = \"ilyde-run-{}\".format(run.id)\n return query_elasticsearch(query)", "def do_stage(self, images):\n\n for i, image in enumerate(images):\n pass\n # logging_tags = logs.image_config_to_tags(image, self.group_by_keywords)", "def _get_logs(self):\n logstart = self.LOGSTART%(self.session.uuid, self.session.run_counter)\n logend = self.LOGEND%(self.session.uuid, self.session.run_counter)\n log = self.container.logs().decode('UTF-8')\n while log.find(logstart) == -1 or log.find(logend) == -1:\n log = self.container.logs().decode('UTF-8')\n cleaned_log = self._get_cleaned_logs(log, logstart, logend)\n self.session.run_counter = self.session.run_counter + 1\n self.session.save()\n return cleaned_log", "def return_user_log_from_frr(dut,log_file_name):\n return st.config(dut,\"docker exec -it bgp cat /var/log/frr/%s\"%log_file_name)", "def _init_logging(self):\n # Setup logging variable\n self.log = logging.getLogger(\"collection-log\")\n self.log.setLevel(logging.INFO)\n self.formatter = logging.Formatter(\"%(asctime)s %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n\n # Log to stdout\n streamhandler = logging.StreamHandler()\n streamhandler.setLevel(logging.INFO)\n streamhandler.setFormatter(self.formatter)\n self.log.addHandler(streamhandler)", "def do_stats(cs, args):\n stats_info = cs.containers.stats(args.container)\n utils.print_dict(stats_info)", "def docker():\n try:\n client = docker_from_env(\n version=os.environ.get('DOCKER_API_VERSION', '1.24'))\n\n containers = []\n\n for container in client.containers.list():\n include_container = False\n if INTERESTING_CONTAINERS.search(container.name):\n include_container = True\n else:\n for tag in container.image.attrs.get('RepoTags', []):\n if INTERESTING_TAGS.match(tag):\n include_container = True\n break\n\n if not include_container:\n continue\n\n docker_metrics = {\n \"stats_type\": \"docker\",\n \"docker\": {\n \"id\": container.short_id,\n \"name\": container.name,\n \"status\": container.status,\n \"labels\": [\"%s=%s\" % (k, v)\n for k, v in container.labels.items()],\n \"tags\": container.image.attrs['RepoTags'],\n 'created': container.image.attrs['Created'],\n }\n }\n if 'version' in container.labels:\n docker_metrics['docker']['image_version'] = \\\n container.labels['version']\n containers.append(docker_metrics)\n\n except Exception as exc:\n logging.debug(\"Error gathering Docker info: %s\", exc)\n return []\n\n return containers", "def run_collector(args):\n store = None\n\n if args.rdbs_store:\n store = get_rdbs_store(args)\n if store:\n log.info('Using RDBS Event Store')\n else:\n log.warning('Unable to set up the RDBS Event Store. Will fall back to using Naive Event Store.')\n if not store:\n store = get_naive_store(args)\n log.info('Using Naive Event Store')\n\n persistent = not args.live_mode\n collector = Collector(store=store, hostname=args.server_host, port=args.port, persistent=persistent)\n\n def stop_collector(sig, frame):\n \"\"\"Signal handler that stops the collector.\n \"\"\"\n log.info('Collector is shutting down.')\n collector.stop()\n\n signal.signal(signal.SIGHUP, stop_collector)\n signal.signal(signal.SIGINT, stop_collector)\n signal.signal(signal.SIGTERM, stop_collector)\n\n collector.run()", "def Logs():\n # time.sleep(100)\n params = request.get_json()\n hostname = params.get('vaultip', '164.99.91.35')\n password = params.get('boxpass', 'novell')\n username = params.get('boxusername', 'root')\n port = 22\n logType = 'download'\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=hostname, username=username, password=password, port=port)\n\n try:\n print('configure')\n logType= 'configure'\n sftp = client.open_sftp()\n stdin = sftp.open('/var/opt/netiq/idm/log/idmconfigure.log','r')\n # sftp.close();\n except Exception:\n try:\n print('install')\n logType= 'install'\n # sftp = client.open_sftp()\n stdin = sftp.open('/var/opt/netiq/idm/log/idminstall.log','r')\n except Exception:\n #sftp.close()\n print('download')\n logType= 'download'\n try:\n stdin = sftp.open('/tmp/download.log','r')\n except Exception:\n sftp.close()\n return jsonify('no file'),200\n #sftp.close()\n log = stdin.readlines()\n data={'type':logType,'log':log}\n return jsonify(data),200", "def dump_log(ip, verbose=False):\n # Force ip to str (if eg. ip == ipaddress class)\n ip = str(ip)\n\n # Getting Auth Key\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((ip, TCP_PORT_AUTH))\n s.send(GET_AUTH_KEY)\n data = s.recv(BUFFER_SIZE)\n s.close()\n\n auth_key = data[16:32]\n if verbose:\n print(\"Received data: {} - KEY: {}\".format(data, auth_key), file=sys.stderr)\n\n\n # Asking for logs\n ask_logs_auth = ASK_LOGS_AUTH_NOKEY + auth_key\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((ip, TCP_PORT_LOGS))\n\n s.send(ask_logs_auth)\n # Waiting for response and discard it, it's only a confirmation\n data = s.recv(BUFFER_AUTH_REPONSE)\n if verbose:\n print(\"Response: \", data, file=sys.stderr)\n\n # Socket Connection will time out after 10 seconds of inactivity (The Cube finished sending the logs)\n s.settimeout(10)\n s.send(ASK_LOGS_FLOW)\n # Waiting for response\n ismart_log = b\"\"\n i = 0\n while 1:\n if i % 4 == 0 and verbose:\n print(\"Receiving logs...\", file=sys.stderr)\n try:\n data = s.recv(BUFFER_FLOW_REPONSE)\n ismart_log += data\n except socket.timeout:\n if verbose:\n print(\"Connection timed out after 10 seconds of inactivity from the cube\", file=sys.stderr)\n break\n i += 1\n\n s.close()\n return ismart_log", "def _start_collect_stats(sc):\n\n arg_dict = {}\n ev = sc.new_event(id=lb_const.EVENT_COLLECT_STATS_V2, data=arg_dict)\n sc.post_event(ev)", "def setup_logging():\n log_format = '%(asctime)-15s %(levelname)s: %(message)s'\n logging.basicConfig(format=log_format, level=logging.DEBUG,\n filename='counting_consumer.out')", "def get_logs(ctx, num):\n app = ctx.obj['app']\n api_client = ctx.obj['api_client']\n colors = dict()\n logs = api_client.get_application_logs(app, lines=num)\n for log in reversed(logs):\n if log['process'] not in colors:\n index = len(colors)\n colors[log['process']] = _available_colors[index % len(_available_colors)]\n for log in logs:\n color = colors[log['process']]\n header = click.style('{timestamp} {app_name}[{process}]:'.format(\n timestamp=log['timestamp'],\n app_name=log['app'],\n process=log['process'],\n ), fg=color)\n click.echo('{header} {message}'.format(header=header, message=log['message']))", "def __init__(self):\n super(GithubCollector, self).__init__()\n config_file = ('collectors.cfg')\n log_file = self.config['Github']['log_file']\n logging.config.fileConfig(config_file,\n defaults={'GithubCollector': log_file}\n )\n self.logger = logging.getLogger('GithubCollector')\n self.elasticsearch = Elasticsearch(['localhost:9200'])\n self.redis = redis.Redis(host='127.0.0.1', port=6379, password='')\n self.timestamp = datetime.date.today().isoformat()", "async def start(self, collection, hostmap):\n records = []\n tmpl = Template(\"--host-record=$name,$ip\")\n for name, ips in hostmap.items():\n for ip in ips:\n records.append(tmpl.substitute(name=name, ip=ip))\n\n cmd = \"--user=root \" + \" \".join(records)\n ports = {(53, \"udp\"): 53}\n\n results = await self.docker.run_containers(\n collection, self.info.name, cmd, ports=ports, local_dns=False)\n\n # Add the dns info to the instances\n for inst, response in zip(collection.instances, results):\n state = inst.state\n if hasattr(state, \"dns_server\"):\n continue\n dns_ip = response[\"NetworkSettings\"][\"IPAddress\"]\n state.dns_server = dns_ip" ]
[ "0.73023754", "0.66543895", "0.6469975", "0.6311792", "0.6195516", "0.619132", "0.6190507", "0.618653", "0.6127707", "0.6075922", "0.60741675", "0.60354525", "0.60254896", "0.59426993", "0.59426993", "0.59009355", "0.58668494", "0.58238095", "0.58153385", "0.57909554", "0.5778273", "0.57710785", "0.5757475", "0.57255214", "0.57233375", "0.5722884", "0.572274", "0.5717944", "0.57012284", "0.5699877", "0.56926674", "0.56883854", "0.5650068", "0.5645316", "0.55597913", "0.5484235", "0.54343253", "0.5425264", "0.5424061", "0.5413615", "0.53654444", "0.53582734", "0.5339465", "0.5316598", "0.53137034", "0.53089005", "0.5301193", "0.5283421", "0.52677995", "0.5266697", "0.5253492", "0.52467984", "0.5239448", "0.52056044", "0.5204825", "0.51946145", "0.51913893", "0.51877767", "0.51716936", "0.51669097", "0.5163208", "0.5159378", "0.5156319", "0.51517844", "0.514178", "0.5134755", "0.5132796", "0.51309717", "0.5117047", "0.51066774", "0.5106055", "0.50979865", "0.50762516", "0.50704473", "0.5062349", "0.50568426", "0.5048418", "0.50469553", "0.5038213", "0.5034547", "0.50174636", "0.50165063", "0.5009246", "0.50091815", "0.5008125", "0.500279", "0.49974343", "0.49966723", "0.49839672", "0.4975071", "0.49731836", "0.49709922", "0.49687877", "0.495262", "0.4951578", "0.4951266", "0.49266827", "0.4924224", "0.49240112", "0.4920952" ]
0.6732043
1
Load in all images from a folder
def load_svhn_images(folder_path): images = [] for file in os.listdir(folder_path): if file.endswith(".png"): image = Image.open(file) image.load() # Load image data as 1 dimensional array # We're using float32 to save on memory space feature = np.array(image, dtype=np.float32) images.append(feature) return images
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_images(self, folder):\n cwd = os.getcwd()\n dir = cwd + '/' + folder\n files = os.listdir(dir)\n for file in files:\n img = pygame.image.load(dir + '/' + file)\n self.images.append(img)", "def load_images_from_folder(folder):\n images = []\n for filename in os.listdir(folder):\n img = Image.open(os.path.join(folder,filename))\n images.append(img)\n return images", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def load_from_folder(path):\n images = []\n files = os.listdir(path)\n files.sort()\n for file in tqdm(files):\n images.append(io.imread(path + file))\n return images", "def load_images(self, files, sub_dir):\n\n for f in files:\n self.images.append(Image(f, sub_dir))", "def loadimages(root):\n imgs = []\n\n def add_json_files(path, ):\n for imgpath in glob.glob(path + \"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('png', \"json\")))\n for imgpath in glob.glob(path + \"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('jpg', \"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path)\n if os.path.isdir(os.path.join(path, o))]\n if len(folders) > 0:\n for path_entry in folders:\n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs", "def loadimages(root):\n imgs = []\n\n def add_json_files(path,):\n for imgpath in glob.glob(path+\"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('png',\"json\")))\n for imgpath in glob.glob(path+\"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('jpg',\"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path) \n if os.path.isdir(os.path.join(path,o))]\n if len(folders)>0:\n for path_entry in folders: \n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs", "def load_pic_in_directory(directory):\n return [Image.open(os.path.join(directory, img)) for img in os.listdir(directory)]", "def load_images(self):\n self.img_paths = sorted(glob(self.img_pattern))\n self.imgs = []\n for idx, this_path in enumerate(self.img_paths):\n try:\n this_img = cv2.imread(this_path)\n if self.downscale > 1:\n this_img = cv2.resize(this_img, (0, 0),\n fx=1/float(self.downscale),\n fy=1/float(self.downscale),\n interpolation=cv2.INTER_LINEAR)\n except Exception as e:\n print(\"error loading img: %s\" % (this_path))\n if this_img is not None:\n self.imgs.append(this_img)\n print(\"loaded img %d size=(%d,%d): %s\" %\n (idx, this_img.shape[0], this_img.shape[1], this_path))\n print(\"loaded %d images\" % (len(self.imgs)))", "def load_images(path):\n images = []\n images_names = []\n \n for file_name in os.listdir(path):\n image_name = file_name\n images_names.append(image_name)\n images_names = sorted(images_names) #use sort to insure linux file sys behaves\n print(images_names) #check for proper order\n\n for file_name in images_names:\n image = pygame.image.load(path + os.sep + file_name).convert()\n images.append(image)\n return images", "def load_images(subdir):\n with perform(\n name='dbutils load_images',\n before='Loading images to gallery',\n fail='Error occured while loading images to gallery',\n after='Images succesfully loaded'\n ):\n load_dummy_images(subdir)", "def load_images(folder_path):\n images = []\n # first make image paths list\n # cv2 can take in wildcard args if using glob\n image_paths = glob.glob(folder_path + \"/*\")\n for path in image_paths:\n images.append(cv2.imread(path))\n return (images, image_paths)", "def load_images(filename):\n images = _load(filename)\n #_info_image(image, title=os.path.basename(filename))\n return images", "def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets", "def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs", "def load_images(self, tmx):\n for image_data in tmx.images:\n if image_data:\n image, _, _ = image_data\n self.load_image(image)", "def load_images(self, image_paths):\n \n fill_list = []\n \n for idx in tqdm(range(len(image_paths))):\n path = image_paths[idx]\n yield cv2.imread(path)", "def loadImages(loadPath):\n img_array = []\n for filename in glob.glob(loadPath):\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width, height)\n img_array.append(img)\n\n return img_array", "def load_images(self):\n images_list = [os.path.join(self.root, image['file_name'])\n for image in self.data['images']]\n\n if self.shuffle:\n random.shuffle(images_list)\n images_list = images_list[:self.max_samples] if self.max_samples is not None and self.max_samples <= len(\n images_list) else images_list\n\n return images_list", "def get_images_of_folder(folder):\n\n Settings.dev_print(\"getting images of folder: {}\".format(folder.get_title()))\n if not folder: return []\n imgs = []\n files = []\n valid_images = [\".jpg\",\".gif\",\".png\",\".tga\",\".jpeg\"]\n for f in os.listdir(folder.get_path()):\n ext = os.path.splitext(f)[1]\n if ext.lower() not in valid_images:\n continue\n file = File()\n setattr(file, \"path\", os.path.join(folder.get_path(),f))\n files.append(file)\n Settings.maybe_print(\"image path: {}\".format(os.path.join(folder.get_path(),f)))\n return files", "def load_images(image_name_to_label):\n images = []\n labels = []\n\n image_names = os.listdir(DEFAULT_IMG_PATH_EDITED)\n\n # Remove directories\n image_names.remove(\"COVID-19\")\n image_names.remove(\"Normal\")\n image_names.remove(\"ViralPneumonia\")\n\n # Load images from specific image directories (COVID-19, normal, viral pneumonia)\n def load_directory(directory):\n notifier.send(\" Loading from directory: \" + directory + \"...\")\n directory_path = DEFAULT_IMG_PATH_EDITED + os.sep + directory\n directory_image_names = os.listdir(directory_path)\n for i, image_name in enumerate(directory_image_names):\n base_image_name = get_base_image_name(image_name)\n query_name = directory + \"/\" + base_image_name\n query_name = query_name.lower().replace(\" \", \"\")\n if query_name in image_name_to_label:\n print(f\" {i / len(directory_image_names) * 100}% - [{image_name}]\")\n image_path = directory_path + os.sep + image_name\n image = get_processed_image(image_path)\n images.append(image)\n labels.append(image_name_to_label[query_name])\n load_directory(\"COVID-19\")\n load_directory(\"Normal\")\n load_directory(\"ViralPneumonia\")\n\n # Load images from default directory\n if LOAD_ALL_IMAGES:\n notifier.send(\" Loading from directory: default...\")\n for i, image_name in enumerate(image_names):\n base_image_name = get_base_image_name(image_name)\n if base_image_name in image_name_to_label:\n print(f\" {i / len(image_names) * 100}% - [{image_name}]\")\n image_path = DEFAULT_IMG_PATH_EDITED + os.sep + image_name\n image = get_processed_image(image_path)\n images.append(image)\n labels.append(image_name_to_label[base_image_name])\n\n return images, labels", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def _load_images(paths):\n assert isinstance(paths, list)\n _R_MEAN = 123.68\n _G_MEAN = 116.78\n _B_MEAN = 103.94\n\n # allocate memory\n images = np.zeros([len(paths), FLAGS.target_height, FLAGS.target_width, 3],\n dtype=np.float32)\n\n # load all images\n pbar = ProgressBar(max_value=len(paths))\n for i in range(len(paths)):\n img = sio.imread(paths[i])\n\n # resize images\n img = sresize(img, (FLAGS.target_height, FLAGS.target_width, 3),\n mode='constant', preserve_range=True)\n\n # store images\n images[i] = img.astype(np.float32)\n pbar.update(i)\n\n # mean removal\n images -= [_R_MEAN, _G_MEAN, _B_MEAN]\n return images", "def _getImagesFromDirectory(self, directoryPath):\n files = [f for f in listdir(directoryPath)\n if isfile(join(directoryPath, f))]\n for filePath in files:\n self._imageDictionary[filePath] = image.load(\n self._formatPath(directoryPath, filePath))", "def getImages(path):\n files = list()\n\n for f in listdir(path):\n file = join(path, f)\n if isfile(file):\n files.append(getImage(file))\n\n return files", "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def load_img(file_list, dir_path):\n data = []\n for file in file_list:\n img = plt.imread(dir_path + file)\n # Convert RGB image to grayscale\n if len(img.shape) == 3:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Resize image to desired size\n img = cv2.resize(img, (64, 64))\n # Store processed image to list\n data.append(img)\n return np.array(data)", "def loadImagesFromDirectory(self, directoryPath):\n if isdir(directoryPath):\n self._getImagesFromDirectory(directoryPath)\n else:\n print(directoryPath + \" does not exists\")", "def load_data_in_folder(self):\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in range(0, idx_max-1):\n data = []\n for f in self.filenames[idx:idx+64]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def readImages(imgFolder='img/'):\n #Each image in images is a numpy array of shape 192x168(x1) (heightxwidth)\n #images datatype is a regular numpy list\n filenames = os.listdir(imgFolder)\n if imgFolder == 'img/':\n images = [imageio.imread('img/'+fn+'/image0.jpg')[::,::].astype(np.float32)/255. for fn in filenames]#glob.glob(imgFolder+'*.jpg')]\n else:\n images = [imageio.imread(imgFolder+fn)[::,::].astype(np.float32)/255. for fn in filenames]\n return images", "def getimagelist(folder):\n imagefolder = Path(folder) \n imagelist = imagefolder.glob(\"**/*.png\") \n return list(imagelist)", "def preprocessfolder(self):\n imgs, _ = getFilesAndHdf(str(self.in_directory.text()))\n self.img_list = sorted(imgs)\n self.updateImageGroups()", "def _locate_images(self):\r\n extensions = '|'.join(self.valid_extensions)\r\n extension_re = re.compile('.+\\.(%s)$' % extensions, re.IGNORECASE)\r\n files = sorted(os.listdir(self.path))\r\n\r\n images = []\r\n for root, dirs, files in os.walk(self.path, followlinks=self.config['follow_links']):\r\n for filename in sorted(files):\r\n if not filename.startswith('.') and extension_re.match(filename):\r\n images.append(Image(path=os.path.join(root, filename), config=self.config))\r\n if not self.config['recursive']:\r\n break\r\n\r\n if not images:\r\n raise SourceImagesNotFoundError(self.path)\r\n\r\n images = sorted(images, reverse=self.config['algorithm_ordering'][0] != '-')\r\n\r\n return images", "def load_images_from_folder(folder, size = (224, 224), start = 0, end = 100000):\n # Empty list to store images loaded from storage\n images = []\n\n # Loop over the files in the folder from start to end\n for filename in os.listdir(folder)[start:end]:\n\n # Read image from the path\n image = cv2.imread(os.path.join(folder,filename))\n\n # Check if the read was successfull\n if image is not None:\n # Resize the image to the target size\n image = cv2.resize(image, dsize = size)\n\n # Convert image from standard cv2 BGR color space to tradition RGB color space\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # Append image to the list of images\n images.append(image)\n\n # Return images as numpy array\n return np.array(images)", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n existing_dirs = [os.path.basename(dir) for dir in os.listdir(FLAGS.output_dir)]\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.JPEG')):\n with tf.gfile.Open(filepath, 'rb') as f:\n image = np.array(Image.open(f).resize([FLAGS.image_height, FLAGS.image_width]).convert('RGB')).astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n if os.path.basename(os.path.normpath(input_dir))=='*':\n head, tail = os.path.split(filepath)\n dirname=os.path.basename(head)\n if dirname in existing_dirs:\n continue\n filename = os.path.join(dirname, tail)\n else:\n filename = os.path.basename(filepath)\n filenames.append(filename)\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_paths(dir):\n IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG',\n '.png', '.PNG', '.bmp', '.BMP']\n image_paths = []\n\n # traverse directory to obtain only paths to images\n for dir_name, _, paths in sorted(os.walk(os.path.expanduser(dir))):\n for path in paths:\n if any(path.endswith(extensions) for extensions in IMG_EXTENSIONS):\n image_paths.append(os.path.expanduser(dir_name + '/' + path))\n\n return image_paths", "def load_set(directName, n = np.inf):\n # Loaded a set of images\n\n files = os.listdir(directName)\n n = min(n, len(files))\n #n = len(files)\n print(\"Loading \" + str(n) + \" images\")\n imgs = [mpimg.imread(directName + files[i]) for i in range(n)]\n\n return imgs", "def _fetch_all_images(self, path) -> List[str]:\n files_all = []\n\n for ext in self.exts:\n files_all.extend(glob.glob(join(path, ext)))\n\n return files_all", "def load_images(image_filename):\n\n # Write code here to loop over image data and populate DB.", "def load_background_image_files(self, folder_path):\n if self.is_binary:\n print(':WARNING: background image files are not loaded for binary '\n 'generation mode.')\n else:\n self.augment.add_background_image_noises(folder_path)", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 1.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def open_images_in(directory):\n\n files = [\n filename\n for filename in os.listdir(directory)\n if \"_\" in filename and not filename.startswith(\"joined\")\n ]\n tiles = []\n if len(files) > 0:\n i = 0\n for file in files:\n pos = get_image_column_row(file)\n im = Image.open(os.path.join(directory, file))\n\n position_xy = [0, 0]\n count = 0\n for a, b in zip(pos, im.size):\n position_xy[count] = a * b\n count = count + 1\n tiles.append(\n Tile(\n image=im,\n position=pos,\n number=i + 1,\n coords=position_xy,\n filename=file,\n )\n )\n i = i + 1\n return tiles", "def read_imgs(path):\n dirs = os.listdir(path)\n imgs = []\n for fn in dirs:\n img_path = path + '/' + fn\n img = cv2.imread(img_path, 1)\n img = np.float32(cv2.resize(img, (224, 224))) / 255\n imgs.append(img)\n imgs = np.array(imgs)\n return imgs", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n # all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n # test_files = [all_files[idx] for x in np.random.choice(len(all_files), 200, replace=False)]\n # for filepath in test_files:\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_sample_images():\n # Try to import imread from scipy. We do this lazily here to prevent\n # this module from depending on PIL.\n try:\n try:\n from scipy.misc import imread\n except ImportError:\n from scipy.misc.pilutil import imread\n except ImportError:\n raise ImportError(\"The Python Imaging Library (PIL) \"\n \"is required to load data from jpeg files\")\n ROOT_Dir = os.getcwd()\n module_path = os.path.join(ROOT_Dir, \"images\")\n with open(os.path.join(module_path, 'README.txt')) as f:\n descr = f.read()\n filenames = [os.path.join(module_path, filename)\n for filename in os.listdir(module_path)\n if filename.endswith(\".jpg\")]\n # Load image data for each image in the source folder.\n images = [imread(filename) for filename in filenames]\n\n return Bunch(images=images,\n filenames=filenames,\n DESCR=descr)", "def load_images(self, image_path):\n X_train = []\n\n # Load all files from the image path using Image.open.\n for i in recursive_list(image_path):\n # Open images as ???\n img = Image.open(i)\n # Convert to NP array.\n img = np.asarray(img)\n # Append them into higher order array.\n if img.shape == (128, 128, 3):\n X_train.append(img)\n\n # return all the images concatenated as a 4D array\n return np.asarray(X_train)", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_all_frames(self, path, convert_alpha=False):\n to_load = []\n for name in listdir(self.get_path(path)):\n as_list = name.split('.')\n if len(as_list) <= 2 and ImageLoader.is_decimal(as_list[0]) and \\\n isfile(self.get_path(path + [name])):\n to_load.append(name)\n to_load.sort(key=lambda name: name.split('.')[0])\n return [self.load_image(path + [x], convert_alpha)\n for x in to_load]", "def load_images_from_directory(input_dir, batch_shape):\n def input_filenames(input_dir):\n all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n all_files.sort()\n return all_files\n\n\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n\n for filepath in input_filenames(input_dir):\n with tf.gfile.Open(filepath, mode='rb') as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n\n # This is a partial batch left over at end.\n # Note that images will still have the proper size.\n if idx > 0:\n yield filenames, images", "def get_images(directory=None): #import from mask.py\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def getImages(self,Project=\"\"):\n #images = [\"image1.jpg\",\"image2.jpg\",\"image3.jpg\"]\n \n os.chdir(self.dataDir)\n images = glob.glob(\"*.png\")\n \n return images", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_image(path):\n imagelist = []\n\n for image_file in os.listdir(path):\n image_path = os.path.join(path, image_file)\n image = Image.open(image_path).resize([224, 224])\n image = np.array(image).astype(np.float) / 128 - 1\n imagelist.append(image)\n\n return np.array(imagelist)", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n if(FLAGS.checkpoint_file_name==\"vgg_16.ckpt\")or(FLAGS.checkpoint_file_name==\"vgg_19.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_50.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_101.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_152.ckpt\"):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float)\n images[idx, :, :, :] = image\n else:\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def readImages(image_dir):\n extensions = ['bmp', 'pbm', 'pgm', 'ppm', 'sr', 'ras', 'jpeg',\n 'jpg', 'jpe', 'jp2', 'tiff', 'tif', 'png']\n\n search_paths = [os.path.join(image_dir, '*.' + ext) for ext in extensions]\n image_files = sorted(sum(map(glob, search_paths), []))\n images = [cv2.imread(f, cv2.IMREAD_UNCHANGED | cv2.IMREAD_COLOR) for f in image_files]\n\n bad_read = any([img is None for img in images])\n if bad_read:\n raise RuntimeError(\n \"Reading one or more files in {} failed - aborting.\"\n .format(image_dir))\n\n return images", "def get_images(self, file_path: str) -> Iterable[Image]:\n return []", "def load_images():\n print(\"[+] UPDATE - Begin loading images\")\n\n colors = [\"w\", \"b\"]\n piece_types = [\"p\", \"R\", \"N\", \"B\", \"K\", \"Q\"]\n for color in colors:\n for type in piece_types:\n piece = color + type\n IMAGES[piece] = p.transform.scale(p.image.load(\"images/\" + piece + \".png\"), (SQ_SIZE, SQ_SIZE))\n\n print(\"[+] UPDATE - Images loaded\")", "def read_images(folder):\n distinct_frames = DistinctFrames()\n\n for file in sorted(sorted(os.listdir(folder)),\n key=len): # sorting files on basis of 1) length and 2) numerical order\n '''\n Sorting is done 2 times because\n if files in the folder are\n 1. image100.pkl\n 2. image22.pkl\n 3. image21.pkl\n firstly sort them to image100.pkl,image21.pkl,image22.pkl then according to length to image21.pkl,image22.pkl,image100.pkl\n '''\n try:\n img_obj = load_from_memory(file, folder)\n time_stamp = img_obj.get_time()\n distinct_frames.add_img_obj(img_obj)\n print(\"Reading image ..\" + str(time_stamp) + \" from \" + folder) # for debug purpose\n except:\n # exception will occur for files like .DS_Store and jpg directory\n continue\n\n if distinct_frames.no_of_frames() != 0:\n distinct_frames.calculate_time()\n\n return distinct_frames", "def load_images(self, filename):\n\n self.images = self.load(filename)\n self.length = len(self.images)\n self.create_teacher()", "def load_images(file):\n\timage_list = [] # List for storing all the images\n\ttargets = []\n\t\n\tfor filename in glob.glob(file + '/*.png'):\n\t\t# ==================\n\t\t# Reading the image\n\t\t# ==================\n\t\timage = scipy.misc.imread(filename).astype(np.float32)\n\t\t\n\t\t# ================================\n\t\t# Converting the image to a vector\n\t\t# ================================\n\t\timage = image.flatten() # (784, )\n\t\t\n\t\t# ==============================\n\t\t# Normalizing the image to numpy\n\t\t# ==============================\n\t\timage = image / 255.0\n\t\timage = image - 0.5\n\t\timage = image * 2.0\n\t\t\n\t\t# ===============================\n\t\t# Appending the image to the list\n\t\t# ===============================\n\t\timage_list.append(image)\n\t\t\n\t\t_, value = filename.split('\\\\')\n\t\t# print(value[0])\n\t\ttargets.append(int(value[0]))\n\t\n\timage_list = np.array(image_list)\n\ttargets = np.array(targets)\n\t\n\t# ================================================\n\t# \t\t\tShuffling the data\n\t# ================================================\n\timage_list, targets = shuffle(image_list, targets)\n\t\n\ttrain_images, test_images, train_targets, test_targets = split(image_list, targets)\n\treturn train_images, test_images, train_targets, test_targets", "def load(self, dirname):\n loaded_filenames = set()\n ini_filename = os.path.join(dirname, \"xpresser.ini\")\n if os.path.exists(ini_filename):\n config = ConfigParser.ConfigParser()\n config.read(ini_filename)\n for section_name in config.sections():\n if section_name.startswith(\"image \"):\n image_name = section_name.split(None, 1)[1]\n try:\n image_filename = config.get(section_name, \"filename\")\n except ConfigParser.NoOptionError:\n raise ImageDirError(\"Image %s missing filename option\"\n % image_name)\n image_filename = os.path.join(dirname, image_filename)\n if not os.path.exists(image_filename):\n raise ImageDirError(\"Image %s file not found: %s\" %\n (image_name, image_filename))\n try:\n image_similarity = config.getfloat(section_name,\n \"similarity\")\n except ConfigParser.NoOptionError:\n image_similarity = None\n except ValueError:\n value = config.get(section_name, \"similarity\")\n raise ImageDirError(\"Image %s has bad similarity: %s\"\n % (image_name, value))\n \n try:\n value = config.get(section_name, \"focus_delta\")\n match = CLICK_POSITION_RE.match(value)\n if not match:\n raise ImageDirError(\"Image %s has invalid click \"\n \"position: %s\" %\n (image_name, value))\n image_focus_delta = (int(match.group(\"x\")),\n int(match.group(\"y\")))\n except ConfigParser.NoOptionError:\n image_focus_delta = None\n image = Image(name=image_name,\n filename=image_filename,\n similarity=image_similarity,\n focus_delta=image_focus_delta)\n self._images[image_name] = image\n loaded_filenames.add(image_filename)\n\n # Load any other images implicitly with the default arguments.\n for basename in os.listdir(dirname):\n filename = os.path.join(dirname, basename)\n if filename not in loaded_filenames:\n ftype, fencoding = mimetypes.guess_type(filename)\n if ftype and ftype.startswith(\"image/\"):\n image_name = os.path.splitext(basename)[0]\n self._images[image_name] = Image(name=image_name,\n filename=filename)", "def get_images(fish):\n fish_dir = TRAIN_DIR+'{}'.format(fish)\n images = [fish+'/'+im for im in os.listdir(fish_dir)]\n return images", "def load_sprites(dir=\"/home/robin/workspace/python/ipt/chess/sprites\"):\n arr = []\n chdir(dir)\n for i in range(12):\n img = mimg.imread(\"sprite_\"+\"{:0>2d}\".format(i)+\".png\")\n arr.append(img)\n return arr", "def return_images(directory):\r\n allfiles = os.listdir(directory)\r\n image_list = [im for im in allfiles if '.jpg' in str(im)]\r\n image_list = [directory + im for im in image_list]\r\n return image_list", "def find_all_images_in_folder(path_to_folder):\n import os, os.path\n \n imgs = [] \n valid_images = [\".jpg\",\".gif\",\".png\",\".jpeg\"]\n for f in os.listdir(path_to_folder):\n pre,ext = os.path.splitext(f) \n if(ext.lower() in valid_images) and not (pre.endswith(\"thumbnail\")):\n #imgs.append( [os.path.join(path_to_folder,pre),ext] )\n imgs.append( [pre ,ext] )\n return imgs", "def load_data_in_folder(self):\n if self.data_filenames:\n print('removing existing data files')\n for f in tqdm(self.data_filenames):\n os.remove(f)\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in tqdm(range(0, idx_max-1)):\n data = []\n for f in self.filenames[idx:idx+self.batch_size]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def load_images(test_data_dir, image_size = (300, 300)):\n # loop over the input images\n images_data = []\n labels = []\n imagePaths = sorted(list(paths.list_images(test_data_dir)))\n for imagePath in imagePaths:\n # load the image, pre-process it, and store it in the data list\n image = cv2.imread(imagePath)\n image = cv2.resize(image, image_size)\n image = img_to_array(image)\n images_data.append(image)\n\n # extract the class label from the image path and update the\n # labels list\n label = imagePath.split(os.path.sep)[-2]\n labels.append(label)\n return images_data, sorted(labels)", "def get_images(path, ext=\".jpg\"):\n return get_files(path, ext)", "def load_images(input_dir, batch_shape=[2000,299,299,3]):\n \n filenames = []\n idx = 0\n filepaths=tf.gfile.Glob(os.path.join('./', '*.png'))\n print(len(filepaths))\n print(filepaths)\n batch_shape[0]=len(filepaths)\n batch_size = batch_shape[0]\n print(batch_shape)\n print(\"ZZZ\")\n images = np.zeros(batch_shape, dtype=np.float32)\n \n for filepath in filepaths:\n# with tf.gfile.Open(filepath) as f:\n# image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0\n image = np.array(scipy.misc.imresize(scipy.misc.imread(filepath),(299,299)),dtype=np.float32)/255\n \n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image -0.5 #* 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n return filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n return filenames, images", "def loadImages(files, targets):\n images = []\n for file in files:\n targets.append(file)\n images.append(snd.imread(file))\n return images, targets", "def load_images(files, open_fn=None):\n if open_fn is None:\n import cv2\n open_fn = cv2.imread\n images = list()\n for _file in files:\n images.append(np.asarray(open_fn(_file)))\n return images", "def all_image_paths(self):\n self.labels = [i for i in (self.get_immediate_subdirectories(self.root_dir))\n if not i.startswith('.')]\n\n for root, subFolders, files in os.walk(self.root_dir):\n files = [i for i in files if not i.startswith('.')]\n files = files[:self.img_num] # hard coded - will not read in\n for i in files:\n self.all_files.append(os.path.abspath(root) + '/'.join(subFolders) + '/' + i)", "def loadImages(self):\n for map_name, img in self.maps.items():\n if img is None or map_name not in __class__.input_tr:\n continue\n getCyclesImage(img)", "def load_scraped_food_images(ROOT):\n Xtr, Ytr = load_food_image_batch(os.path.join(ROOT, 'train'),50000)\n Xte, Yte = load_food_image_batch(os.path.join(ROOT, 'test'),10000)\n return Xtr, Ytr, Xte, Yte", "def load_img_names(path):\n images = [f for f in listdir(path) if isfile(join(path, f))]\n df = pd.DataFrame(images)\n df.columns = [\"img\"]\n df = df.sort_values(\"img\")\n return df[\"img\"].values", "def load_groundtruths(folder_path, num_images):\n imgs = []\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n # See if it is better to use dtype = int\n hot_img = convert_image_to_hot(img)\n imgs.append(hot_img)\n else:\n print('File ' + image_path + ' does not exist')\n #imgs = np.around(imgs) # Uncomment if we want to round values.\n imgs_array = np.asarray(imgs)\n return imgs_array", "def load_images(pool, entries):\n start = time.perf_counter()\n images = pool.map(ski.io.imread, [x.path for x in entries])\n logger.info(\"Loaded %i images:\", len(images))\n util.pprint_log([x.name for x in entries], logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return images", "def read_local(path):\n files = os.listdir(path)\n imgs = []\n for f in files:\n if f.endswith(\".tiff\") or f.endswith(\".tif\"):\n img = Image.open(os.path.join(path, f))\n imgs.append(np.array(img))\n return imgs", "def list_images(path=['.']):\n for image_dir in set(path):\n if not os.path.isdir(image_dir):\n continue\n for filename in os.listdir(image_dir):\n bname, ext = os.path.splitext(filename)\n if ext.lower() not in VALID_IMAGE_EXTS:\n continue\n\n filepath = os.path.join(image_dir, filename)\n yield strutils.decode(filepath)", "def _load_images_and_labels(image_dir):\n\n print('Extracting images from: ', image_dir)\n\n image_paths = _load_image_paths(image_dir)\n images = _extract_images(image_paths)\n num_images = len(image_paths)\n labels = np.ones(num_images, dtype=np.int64)\n\n return images, labels", "def populate_image_lists():\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_a\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n images_a.append(path.path)\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_b\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n images_b.append(path.path)", "def get_data_images(path):\n\n return sorted(\n [os.path.join(root, filename) for root, dirnames, filenames in os.walk(path) for filename in\n filenames if\n filename.endswith('.jpg') and os.path.getsize(os.path.join(root, filename)) > 0]\n )", "def read_image():\n images = []\n for hand in os.listdir('images'):\n img = cv2.imread(os.path.join('images', hand))\n if img is not None:\n images.append(img)\n return images", "def load_images(images_path, as_array=True):\n list_names = []\n list_img = []\n\n path_list = glob.glob(images_path + '/*', recursive=False)\n path_list.sort()\n\n for image_name in path_list:\n # ************** LINUX ****************\n name = image_name.split('/')[-1]\n # ************* WINDOWS **************\n name = name.split('\\\\')[-1]\n list_names.append(name)\n\n for image_name in path_list:\n # ******************* WINDOWS & LINUX ***************************\n image = cv2.imdecode(np.fromfile(image_name, np.uint8),\n cv2.IMREAD_UNCHANGED)\n # ******************* LINUX ******************************\n # imagen = cv2.imread(image_name)\n image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n list_img.append(image_rgb)\n\n if as_array is True:\n list_img = np.array(list_img)\n\n return(list_img, list_names)", "def readImages(image_dir):\n images = {}\n extensions = ['bmp', 'pbm', 'pgm', 'ppm', 'sr', 'ras', 'jpeg',\n 'jpg', 'jpe', 'jp2', 'tiff', 'tif', 'png']\n\n search_paths = [os.path.join(image_dir, '*.' + ext) for ext in extensions]\n image_files = sorted(reduce(list.__add__, map(glob, search_paths)))\n for f in image_files:\n images[f[f.rfind(\"/\") + 1:f.rfind(\".\")]] = cv2.imread(f, cv2.IMREAD_UNCHANGED | cv2.IMREAD_COLOR)\n\n return images", "def get_image_list(folder):\n image_list = []\n for each_file in os.listdir(folder):\n filename, ext = os.path.splitext(each_file)\n if ext == '.gif':\n image_list.append(each_file)\n return image_list", "def load_image_data():\n print(\"Loading image data...\")\n label_dict = get_label_vectors()\n categories = [c for c in os.listdir('images/') if c[0] != '.'] # ignore\n labels = [] # instantiate list for image labels\n data = [] # instantiate list for image data\n for i in categories:\n path = 'images/{}/'.format(i) # define path to category folder\n for j in os.listdir(path): # get images from category folder\n labels.append(label_dict[i]) # append label vector\n data.append(cv2.imread(path + j).flatten()) # append flattened image data\n\n labels = np.array(labels) # convert lists to array\n data = np.array(data)\n print(\"Done.\")\n\n return labels, data", "def getimgs():", "def load_images(filelist):\n # pixel value range 0-255\n if not isinstance(filelist, list):\n im = Image.open(filelist).convert('L')\n return np.array(im).reshape(1, im.size[1], im.size[0], 1)\n data = []\n for file in filelist:\n im = Image.open(file).convert('L')\n data.append(np.array(im).reshape(1, im.size[1], im.size[0], 1))\n return data", "def process_images(image_folder: Path) -> List[Dict]:\n images = []\n files = image_folder.glob(\"*.jpg\")\n\n for file_path in files:\n file_name = file_path.name\n file_id = file_name.split(\".jpg\")[0]\n file_id = file_id.split(\"in\")[-1]\n file_id = int(file_id)\n file_id = f\"{file_path.parent.parent.name}_{str(file_id)}\"\n\n width, height = imagesize.get(str(file_path))\n\n image_data = {\"id\": file_id,\n \"width\": width,\n \"height\": height,\n \"filename\": str(file_path)}\n images.append(image_data)\n\n return images", "def get_images(path):\n\n # Cast path to absolute path\n absolute = abspath(path)\n\n img_lis = [] # Holds images in a folder\n file_lis = get_files(absolute)\n\n # Now get the images within file list\n img_lis = [f for f in file_lis if is_filetype(f)]\n\n return img_lis", "def get_images_from_dir(src_folder, n_images=None, shuffle=False):\n\tvalid_extensions = set([\"bmp\", \"jpeg\", \"jpg\", \"png\", \"tif\", \"tiff\"])\n\tsrc_contents = os.walk(src_folder)\n\tdirpath, _, fnames = src_contents.next()\n\timg_dir = os.path.split(dirpath)[-1]\n\timg_files = [os.path.join(dirpath, name) for name in fnames]\n\tif shuffle:\n\t\trandom.shuffle(img_files)\n\tif n_images:\n\t\timg_files = img_files[:n_images]\n\timages = [cv2.imread(name, cv2.IMREAD_GRAYSCALE) for name in \n\t\t\t img_files[:n_images] if os.path.splitext(name)[-1][1:].lower() \n\t\t\t in valid_extensions]\n\tif shuffle:\n\t\trandom.shuffle(images)\n\treturn images", "def get_img_files(images, db):\n img_dir = db.source\n if img_dir == None:\n raise ValueError('Cannot locate file without a base path. This method looks for it at \\\n db.source, which is not set. This should be set by the loader during DB construction!')\n img_dir = path.join(img_dir, 'img') \n locs = db.get_img_locs(images)\n titles = db.get_location_titles()\n returnval = []\n for image in images:\n loc = locs[image]\n if loc is None:\n raise ValueError('The image %s could not be found' % image)\n returnval.append(path.join(img_dir, titles[loc], str(image) + '.jpg'))\n return returnval", "def get_imgs(path):\n imlist = {}\n for each in glob(path + \"*\"):\n word = each.split(\"/\")[-1]\n imlist[word] = []\n for imagefile in glob(path+word+\"/*\"):\n im = cv2.imread(imagefile, 0)\n imlist[word].append(im)\n\n return imlist", "def load_images(self, target = \"standard\", path=OMNIGLOT_DATAPATH):\n X = []\n Y = []\n folderName = {}\n if target == \"standard\":\n trainFolders = [\"images_background\"]\n testFolders = [\"images_evaluation\"]\n elif target == \"minimal\":\n trainFolders = [\"images_background_small1\", \"images_background_small2\"]\n testFolders = [\"images_evaluation\"]\n \n if self.train:\n for trainFolder in trainFolders:\n folderPath = os.path.join(path, trainFolder)\n imgAllCount = 0 # this is counted for the whole images in all alphabet\n chaAllCount = 0 # this is counted for the whole characters in all alphabet\n\n for alphabet in sorted(os.listdir(folderPath)):\n alphabetPath = os.path.join(folderPath, alphabet)\n folderName[alphabet] = {'totalChar': 0, 'charIndex': [], 'totalImg': 0, 'imgIndex': []}\n \n imgAlphabetCount = 0 # this is counted for the number of images in this alphabet\n chaAlphabetCount = 0 # this is counted for the number of character in this alphabet\n\n folderName[alphabet]['charIndex'].append(chaAllCount)\n folderName[alphabet]['imgIndex'].append(imgAllCount)\n \n for letter in sorted(os.listdir(alphabetPath)):\n letterPath = os.path.join(alphabetPath, letter)\n \n for letterImage in os.listdir(letterPath):\n imagePath = os.path.join(letterPath, letterImage)\n image = mpimg.imread(imagePath)\n X.append(image)\n Y.append(chaAllCount)\n \n imgAlphabetCount += 1\n imgAllCount += 1\n\n chaAlphabetCount += 1\n chaAllCount += 1\n \n folderName[alphabet]['totalChar'] = chaAlphabetCount\n folderName[alphabet]['totalImg'] = imgAlphabetCount\n folderName[alphabet]['charIndex'].append(chaAllCount-1)\n folderName[alphabet]['imgIndex'].append(imgAllCount-1)\n \n X = np.stack(X) \n X = X.reshape(-1, IMAGES_PER_CHARACTER, X.shape[1], X.shape[2])\n return X, np.stack(Y), folderName", "def load_images(path, p=1, feature=None, transform=None):\n\n images = os.listdir(path)\n images = random.sample(images, math.ceil(len(images) * p))\n\n loaded = [\n load_image(\n os.path.join(path, img),\n feature=feature, transform=transform)\n for img in images]\n\n return np.array([x for x in loaded if x is not None])", "def get_imlist(path):\n\treturn [os.path.join( path, f) for f in os.listdir(path) if f.endswith('.jpg')]", "def import_images(site):\n image_bank = site['imagens']\n # look inside \"images\" folder and import all files\n path = os.path.dirname(os.path.abspath(__file__)) + '/browser/images/'\n logger.info(u'Importando imagens')\n for name in os.listdir(path):\n with open(path + name) as f:\n image = StringIO(f.read())\n img_name = name.split('.')[0]\n title = img_name.replace('-', ' ').title()\n api.content.create(\n image_bank,\n type = 'Image',\n id = name,\n title = title,\n description = u'Esta imagem é referenciada nos conteúdos do portal.',\n image = image,\n creators = CREATORS,\n )\n logger.debug(u' {0} importada'.format(name))", "def read_from_folder(args, n_values=50):\n images = []\n img_id = 0\n basedir = str(args['input_train'])\n class_dirs = os.listdir(basedir)\n # load images from base directory\n for class_dir in class_dirs:\n image_files = glob.glob(os.path.join(basedir, class_dir, \"*\"))\n\n # test case\n if args['test']:\n image_files = image_files[0:n_values]\n\n for image_file in image_files:\n img = image.OCRImage(pil_image=Image.open(image_file),\n img_id=img_id,\n img_class=class_dir,\n img_hex=image_file[:-4][-4:])\n images.append(img)\n img_id += 1\n\n return images" ]
[ "0.8685325", "0.8239608", "0.81866777", "0.7909743", "0.78601134", "0.776808", "0.7751472", "0.773561", "0.76567906", "0.75322235", "0.75300574", "0.74120563", "0.7395588", "0.7363391", "0.7355561", "0.72881615", "0.72234184", "0.71718514", "0.71616906", "0.71594054", "0.715789", "0.7154474", "0.7142782", "0.71236473", "0.710087", "0.70926076", "0.70539504", "0.6998348", "0.699098", "0.69827235", "0.6948379", "0.6946889", "0.690835", "0.6905089", "0.6901436", "0.6896508", "0.6893413", "0.6881445", "0.6881132", "0.68729115", "0.6867905", "0.68615234", "0.6851643", "0.6840283", "0.6831403", "0.68046695", "0.68040967", "0.68006223", "0.6797464", "0.6793892", "0.6777946", "0.6774075", "0.6768454", "0.67661536", "0.67599326", "0.6751163", "0.6747636", "0.6741192", "0.6740046", "0.6737542", "0.67261535", "0.6719316", "0.66941047", "0.66913956", "0.6682815", "0.6673824", "0.6671724", "0.66698664", "0.6661389", "0.66612256", "0.66499996", "0.6648911", "0.6647124", "0.66448385", "0.6641546", "0.66362", "0.6627593", "0.66258687", "0.6623937", "0.6615304", "0.6588019", "0.6570821", "0.65652657", "0.65632755", "0.6560987", "0.655067", "0.65477884", "0.6541096", "0.653195", "0.6522254", "0.6518842", "0.6507195", "0.65021735", "0.6501319", "0.64951944", "0.6484214", "0.6478986", "0.6477463", "0.64773613", "0.6476472" ]
0.6842482
43
Read in labels from digitStruct.mat file to create a dict of image file name and corresponding labels
def read_labels(digitstruct_file): labels = dict() for dsObj in tdqm(yieldNextDigitStruct(digitstruct_file), ncols=50): image_labels = [] for bbox in dsObj.bboxList: image_labels.append(bbox.label) labels[dsObj.name] = image_labels return labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_imagenet_as_dict(self):\n real_file_path = os.path.realpath(self.map_file)\n if not os.path.exists(real_file_path):\n raise IOError(\"map file {} not exists\".format(self.map_file))\n\n label_dict = {}\n with open(real_file_path) as fp:\n line = fp.readline()\n while line:\n labels = line.split(\" \")\n label_dict[labels[1]] = labels[0]\n line = fp.readline()\n\n # get all the dir which are n02087046, n02094114, n02109525\n dir_paths = {}\n for item in label_dict:\n real_path = os.path.join(self.image_dir, label_dict[item])\n if not os.path.isdir(real_path):\n logger.warning(\"{} dir is not exist\".format(real_path))\n continue\n dir_paths[item] = real_path\n\n if not dir_paths:\n raise PathNotExistsError(\"not valid image dir in {}\".format(self.image_dir))\n\n # get the filename, label and image binary as a dict\n for label in dir_paths:\n for item in os.listdir(dir_paths[label]):\n file_name = os.path.join(dir_paths[label], item)\n if not item.endswith(\"JPEG\") and not item.endswith(\"jpg\"):\n logger.warning(\"{} file is not suffix with JPEG/jpg, skip it.\".format(file_name))\n continue\n data = {}\n data[\"file_name\"] = str(file_name)\n data[\"label\"] = int(label)\n\n # get the image data\n real_file_path = os.path.realpath(file_name)\n image_file = open(real_file_path, \"rb\")\n image_bytes = image_file.read()\n image_file.close()\n if not image_bytes:\n logger.warning(\"The image file: {} is invalid.\".format(file_name))\n continue\n data[\"image\"] = image_bytes\n yield data", "def extract_labels(filename, num_images):\n filepath = os.path.join(WORK_DIRECTORY, filename)\n print('Extracting', filepath)\n with open(filepath, mode='rb') as bytestream:\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def read_stanford_labels():\n # First get the hardi data\n fetch_stanford_hardi()\n hard_img, gtab = read_stanford_hardi()\n\n # Fetch and load\n files, folder = fetch_stanford_labels()\n labels_file = pjoin(folder, \"aparc-reduced.nii.gz\")\n labels_img = nib.load(labels_file)\n return hard_img, gtab, labels_img", "def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')", "def get_images_and_labels(tampered_path, authentic_path):\n tampered_dir = tampered_path\n authentic_dir = authentic_path\n images = {}\n for im in glob.glob(authentic_dir):\n images[im] = {}\n images[im]['mat'] = cv2.imread(im)\n images[im]['label'] = 0\n for im in glob.glob(tampered_dir):\n images[im] = {}\n images[im]['mat'] = cv2.imread(im)\n images[im]['label'] = 1\n return images", "def load_labels(path, kmer=True, rg=True, clip=True, rna=True, go=True):\n\n labels = dict()\n if go: labels[\"X_GO\"] = gzip.open(os.path.join(path,\n \"matrix_GeneOntology.tab.gz\")).readline().split(\"\\t\")\n if kmer: labels[\"X_KMER\"] = gzip.open(os.path.join(path,\n \"matrix_RNAkmers.tab.gz\")).readline().split(\"\\t\")\n if rg: labels[\"X_RG\"] = gzip.open(os.path.join(path,\n \"matrix_RegionType.tab.gz\")).readline().split(\"\\t\")\n if clip: labels[\"X_CLIP\"] = gzip.open(os.path.join(path,\n \"matrix_Cobinding.tab.gz\")).readline().split(\"\\t\")\n if rna: labels[\"X_RNA\"] = gzip.open(os.path.join(path,\n \"matrix_RNAfold.tab.gz\")).readline().split(\"\\t\")\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def read_label_file(self, label_file_name = None): #completed\n if label_file_name is None:\n label_file_name = self.label_file_name\n try:\n label_data = sp.loadmat(label_file_name)['labels'].astype(np.int32)\n return label_data#[:,1], label_data[:,0]#in MATLAB format\n except IOError:\n print \"Unable to open \", label_file_name, \"... Exiting now\"\n sys.exit()", "def get_pet_labels(images_dir):\r\n \r\n # Creates a list of files in directory from pet images directory\r\n in_files = listdir(images_dir)\r\n \r\n # Process each of the files such that the created dictionary would have\r\n # key = filename and the value = picture label\r\n \r\n # Create an empty dictionary to hold pet labels\r\n petlabels_dic = dict()\r\n \r\n \r\n \r\n for idx in range(0, len(in_files), 1): \r\n if in_files[idx][0] != \".\":\r\n pet_image_name = in_files[idx].split(\"_\")\r\n # Check if the first character is uppercase letter. If it is, then lowercase that first character\r\n if pet_image_name[0].isupper() : \r\n pet_image_name = pet_image_name.lower()\r\n # Create a temporary label variable to hold pet label name\r\n pet_label = \" \"\r\n \r\n # Process each of the character strings(words) split by '_' in \r\n # the list pet_image_name\r\n for word in pet_image_name: \r\n if word.isalpha():\r\n pet_label += word + \" \"\r\n pet_label = pet_label.strip()\r\n if in_files[idx] not in petlabels_dic:\r\n petlabels_dic[in_files[idx]] = [pet_label]\r\n else: \r\n print(\" Warning: Duplicate files exist in dictionary\", in_files[idx])\r\n \r\n \r\n # Return dictionary of pet lables\r\n return(petlabels_dic)", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def get_image_labels_mapping(images_fp, labels_fp):\n name_map = {}\n\n for f in images_fp():\n image_name = f[0]['file']\n vars = {k.upper():v for k,v in f[0].items() if k!='file' }\n label_name = labels_fp.get_matching(**vars)[0]['file']\n name_map[image_name] = label_name\n return name_map", "def retrieve_labels(file, label_indices):\n\n\t# Initialize numpy matrix to store the images\n\tlabels = np.zeros((len(label_indices), 10))\n\n\twith open(file, \"rb\") as f:\n\t\t# Intialize counters\n\t\ti = 0\n\t\tlabel_number = 0\n\n\t\t# Read first byte\n\t\tbyte = f.read(1)\n\n\t\t# Find each image in the data file\n\t\tfor label_index in label_indices:\n\t\t\t# Read in bytes until you arrive at the label\n\t\t\twhile byte and (i < (label_index + 8)):\n\t\t\t\tbyte = f.read(1)\n\t\t\t\ti += 1\n\n\t\t\t# Store label value in numpy array\n\t\t\tvalue = int.from_bytes(byte, \"big\")\n\t\t\tlabels[label_number] = np.zeros(10)\n\t\t\tlabels[label_number, value] = 1\n\n\t\t\t# Increment to next label\n\t\t\tlabel_number += 1\n\n\treturn labels", "def get_images_and_labels_nc():\n refs = get_ref_df()\n images = {}\n for _, data in refs.iterrows():\n if data['ProbeFileName'] in images:\n continue\n im = data['ProbeFileName']\n images[im] = 1 if data['IsTarget'] == 'Y' else 0\n return images", "def make_label_map(path, label_list):\r\n \r\n img = []\r\n for name in path:\r\n now = np.zeros((224,224))\r\n im = cv2.resize(cv2.imread(name), (224,224)).tolist()\r\n for y, i in enumerate(im):\r\n for x, j in enumerate(i):\r\n try:\r\n now[y, x] = label_list.index(j)\r\n\r\n except ValueError:\r\n now[y, x] = 0\r\n\r\n img.append(now)\r\n return img", "def parse_labelfile(path):\n with open(path, \"r\") as FILE:\n lines = FILE.readlines()\n\n\n labels = {x.split(\":\")[0]: x.split(\":\")[1] for x in lines[1:]}\n\n for key in labels:\n labels[key] = np.array(labels[key].split(\",\")).astype(\"uint8\")\n\n return labels", "def extract_labels(pdbbind_label_file):\n assert os.path.isfile(pdbbind_label_file)\n labels = {}\n with open(pdbbind_label_file) as f:\n content = f.readlines()\n for line in content:\n if line[0] == \"#\":\n continue\n line = line.split()\n # lines in the label file have format\n # PDB-code Resolution Release-Year -logKd Kd reference ligand-name\n #print line[0], line[3]\n labels[line[0]] = line[3]\n return labels", "def create_labelmapDict_patch(list_all_images, path_dataset):\n list_all_classes = []\n for idx, name_image_ in enumerate(list_all_images):\n _, tail = os.path.split(name_image_)\n temp_obj = []\n name_file_xml_all = os.path.join(path_dataset, 'LABELS', tail[0:-3] + 'xml')\n if os.path.exists(name_file_xml_all):\n with tf.gfile.GFile(name_file_xml_all, 'rb') as fid:\n xml_str = fid.read()\n xml = etree.fromstring(xml_str)\n data = tfrecord_util.recursive_parse_xml_to_dict(xml)['annotation']\n if 'object' in data:\n for obj in data['object']:\n name_in_obj_ = obj['name'].replace(' ', '').strip()\n if name_in_obj_ != 'INCOMPLETAS':\n list_all_classes.append(name_in_obj_)\n temp_obj.append(obj)\n # list_all_classes = unique_list(list_all_classes)\n list_all_classes = list(set(list_all_classes))\n list_all_classes.sort()\n list_all_classes.insert(0, 'background')\n labelmap_ = {el: k for k, el in enumerate(list_all_classes)}\n return labelmap_", "def read_idx_2_label():\n with open('../Data/imagenet_class_index.json') as f:\n dictionary = json.load(f)\n return dictionary", "def extract_labels(filename, num_images):\n gt_imgs = []\n for i in range(1, num_images+1):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n gt_imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(gt_imgs)\n gt_patches = [img_crop(gt_imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE, 0, False) for i in range(num_images)]\n data = numpy.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n labels = numpy.asarray([value_to_class(numpy.mean(data[i])) for i in range(len(data))])\n\n # Convert to dense 1-hot representation.\n return labels.astype(numpy.float32)", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def load_label(self, file, variable_name=\"group\"):\n data = scipy.io.loadmat(file)\n self.logger.info(\"loading mat file %s\", file)\n label = data[variable_name].todense().astype(np.int)\n label = np.array(label)\n print(label.shape, type(label), label.min(), label.max())\n return label", "def load_labels(path):\n with open(path, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r\"[:\\s]+\", content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n # print(labels)\n return labels", "def create_readable_names_for_imagenet_labels():\n\n base_url = 'http://cnbj1-fds.api.xiaomi.net/ml-datasets/imagenet/' # noqa\n synset_url = '{}/imagenet_lsvrc_2015_synsets.txt'.format(base_url)\n synset_to_human_url = '{}/imagenet_metadata.txt'.format(base_url)\n\n filename, _ = urllib.urlretrieve(synset_url)\n synset_list = [s.strip() for s in open(filename).readlines()]\n num_synsets_in_ilsvrc = len(synset_list)\n assert num_synsets_in_ilsvrc == 1000\n\n filename, _ = urllib.urlretrieve(synset_to_human_url)\n synset_to_human_list = open(filename).readlines()\n num_synsets_in_all_imagenet = len(synset_to_human_list)\n assert num_synsets_in_all_imagenet == 21842\n\n synset_to_human = {}\n for s in synset_to_human_list:\n parts = s.strip().split('\\t')\n assert len(parts) == 2\n synset = parts[0]\n human = parts[1]\n synset_to_human[synset] = human\n\n label_index = 1\n labels_to_names = {0: 'background'}\n for synset in synset_list:\n name = synset_to_human[synset]\n labels_to_names[label_index] = name\n label_index += 1\n\n return labels_to_names", "def labels_for_training_data():\n current_id = 0\n label_ids = dict()\n faces, faces_ids = list(), list()\n\n # Go through directories and find label and path to image\n for root, dirs, files in walk('data/'):\n for file in files:\n if file.endswith('.jpg') or file.endswith('.png'):\n img_path = path.join(root, file)\n label = path.basename(root).replace(' ', '-').lower()\n if label not in label_ids:\n label_ids[label] = current_id\n current_id += 1\n id_ = label_ids[label]\n\n test_img = cv2.imread(img_path)\n test_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)\n if test_img is None:\n print('Image not loaded properly')\n continue\n\n faces.append(test_img)\n faces_ids.append(id_)\n\n # Make directory with labels doesn't exist make directory and file with labels\n if not path.exists('labels/'):\n makedirs('labels/')\n with open('labels/face-labels.pickle', 'wb') as file:\n pickle.dump(label_ids, file)\n\n return faces, faces_ids", "def load_label(self, idx):\n im = open('{}/GTTXT/{}.txt'.format(root_dir, idx))\n\t#print(type(im.readlines()[0].rstrip(\"\\n\")))\n rgb_label = [i.rstrip(\"\\n\").split(\" \") for i in im.readlines()]\n\tlabel=[]\t\n\tfor i in rgb_label:\n\t\tlabel+=[int(j) for j in i]\n\tlabel=np.array(label).reshape(720,960)\n\tlabel[label==-1]=12\n\t#print(np.unique(label))\n #label = label[np.newaxis, ...]\n return label", "def ExtractLabel(ImgName):\n # Each img has name notation \"*****a0X*\" where X is PlasticType\n PlasticType = ImgName[7] \n return {\n '1': 0, # PET\n '2': 1, # HDPE\n '4': 2, # LDPE\n '5': 3, # PP\n '6': 4, # PS\n '7': 5, # Other\n }[PlasticType]", "def read_image_with_label(dir, file):\n assert type(file) == str, \"File name is not string.\"\n f = os.path.join(dir, file)\n info = file.split(\"_\")\n try:\n label = [int(info[x]) for x in range(1, 3)]\n except:\n print(\"The format of file name is not correct.\")\n else:\n return Image.open(f), label", "def get_pet_labels(image_dir):\n # Create dictionary\n petlabels_dic = {}\n\n # Retrieve the filenames from folder pet_images/\n # Try to catch exceptions (folder does not exists, etc..)\n try:\n filename_list = listdir(image_dir)\n except:\n print('** Error: unable to list files in \"{}\" folder.'.format(image_dir))\n exit()\n else:\n for idx in range(0,len(filename_list)):\n #if filename_list[idx] not in petlabels_dic: # required? probably not\n # Remove extension from filename\n filename = filename_list[idx].split('.')[0]\n # Create a list of words from filename, removing digits\n filename_labels = list(filter(lambda label: label.isalpha(), filename.split('_')))\n # Create key->value item in dictonary\n petlabels_dic[filename_list[idx]] = [\" \".join(filename_labels).lower()]\n\n # Return dictionary\n return petlabels_dic", "def _read_color_labels(filename):\n line_parser = lambda line: (int(line.split(',')[0]), line.split(',')[-1])\n with open(filename, 'r') as labels:\n label_map = dict([line_parser(line.strip()) for line in labels])\n return label_map", "def extract_labels(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(10000)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def extract_labels(filename, num_images):\n\n# this function definition has been taken from internet \n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) #Interpret a buffer as a 1-dimensional array\n return labels", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def read_images(self, img_name, label_name):\n image_string = tf.read_file(img_name)\n image_decoded = tf.image.decode_jpeg(image_string, channels=3)\n label_string = tf.read_file(label_name)\n label_decoded = tf.image.decode_jpeg(label_string, channels=1)\n return image_decoded, label_decoded", "def load_labels(filename):\n\n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read()\n\n magic, n_labels = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(2))\n\n assert magic[0] == 2049, \"bad magic number, what do?\"\n\n label_stream = array.array('B', b[8:])\n \n assert len(label_stream) == n_labels[0], \"mismatch in label length\"\n \n # label_stream is actually type array.array, which is iterable surely.\n # i'll convert it anyway...\n return tuple(label_stream)", "def _extract_labels(self, filename, one_hot=False):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = self._read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = self._read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return self._dense_to_one_hot(labels)\n return labels", "def unpack_labels(self, labels,\n is_box = False):\n unpacked_labels = {}\n count = 0\n for level in range(self.min_level, self.max_level + 1):\n feat_size_y = tf.cast(self.image_size[0] / 2 ** level, tf.int32)\n feat_size_x = tf.cast(self.image_size[1] / 2 ** level, tf.int32)\n steps = feat_size_y * feat_size_x * self.anchors_per_location\n if is_box:\n unpacked_labels[level] = tf.reshape(labels[count:count + steps],\n [-1, 4])\n else:\n unpacked_labels[level] = tf.reshape(labels[count:count + steps],\n [feat_size_y, feat_size_x, -1])\n count += steps\n return unpacked_labels", "def read_label_from_xml(label_path):\n labels = parseXML(label_path)\n label_dic = {}\n for label in labels:\n first_frame = label.firstFrame\n nframes = label.nFrames\n size = label.size\n obj_type = label.objectType\n for index, place, rotate in zip(range(first_frame, first_frame+nframes), label.trans, label.rots):\n if index in label_dic.keys():\n label_dic[index][\"place\"] = np.vstack((label_dic[index][\"place\"], place))\n label_dic[index][\"size\"] = np.vstack((label_dic[index][\"size\"], np.array(size)))\n label_dic[index][\"rotate\"] = np.vstack((label_dic[index][\"rotate\"], rotate))\n else:\n label_dic[index] = {}\n label_dic[index][\"place\"] = place\n label_dic[index][\"rotate\"] = rotate\n label_dic[index][\"size\"] = np.array(size)\n return label_dic, size", "def get_labeled_data(imagefile, labelfile):\n # Open the images with gzip in read binary mode\n images = open(imagefile, 'rb')\n labels = open(labelfile, 'rb')\n\n # Read the binary data\n # We have to get big endian unsigned int. So we need '>I'\n\n # Get metadata for images\n images.read(4) # skip the magic_number\n number_of_images = images.read(4)\n number_of_images = unpack('>I', number_of_images)[0]\n rows = images.read(4)\n rows = unpack('>I', rows)[0]\n cols = images.read(4)\n cols = unpack('>I', cols)[0]\n\n # Get metadata for labels\n labels.read(4) # skip the magic_number\n N = labels.read(4)\n N = unpack('>I', N)[0]\n\n if number_of_images != N:\n raise Exception('number of labels did not match the number of images')\n\n # Get the data\n X = np.zeros((N, rows * cols), dtype=np.uint8) # Initialize numpy array\n y = np.zeros(N, dtype=np.uint8) # Initialize numpy array\n for i in range(N):\n for id in range(rows * cols):\n tmp_pixel = images.read(1) # Just a single byte\n tmp_pixel = unpack('>B', tmp_pixel)[0]\n X[i][id] = tmp_pixel\n tmp_label = labels.read(1)\n y[i] = unpack('>B', tmp_label)[0]\n return (X, y)", "def get_labeled_data(imagefile, labelfile):\n # Open the images with gzip in read binary mode\n images = open(imagefile, 'rb')\n labels = open(labelfile, 'rb')\n\n # Read the binary data\n # We have to get big endian unsigned int. So we need '>I'\n\n # Get metadata for images\n images.read(4) # skip the magic_number\n number_of_images = images.read(4)\n number_of_images = unpack('>I', number_of_images)[0]\n rows = images.read(4)\n rows = unpack('>I', rows)[0]\n cols = images.read(4)\n cols = unpack('>I', cols)[0]\n\n # Get metadata for labels\n labels.read(4) # skip the magic_number\n N = labels.read(4)\n N = unpack('>I', N)[0]\n\n if number_of_images != N:\n raise Exception('number of labels did not match the number of images')\n\n # Get the data\n X = np.zeros((N, rows * cols), dtype=np.uint8) # Initialize numpy array\n y = np.zeros(N, dtype=np.uint8) # Initialize numpy array\n for i in range(N):\n for id in range(rows * cols):\n tmp_pixel = images.read(1) # Just a single byte\n tmp_pixel = unpack('>B', tmp_pixel)[0]\n X[i][id] = tmp_pixel\n tmp_label = labels.read(1)\n y[i] = unpack('>B', tmp_label)[0]\n return (X, y)", "def load_labels(label_path):\r\n\r\n with open(label_path, \"r\") as f:\r\n\r\n lines = f.readlines()\r\n \r\n label = {}\r\n index = []\r\n for i, line in enumerate(lines):\r\n sp = line.split()\r\n label[sp[0]] = [int(sp[1]),int(sp[2]),int(sp[3])]\r\n index.append([int(sp[3]),int(sp[2]),int(sp[1])])\r\n\r\n return label, index", "def read_labeled_image_list(image_list_file):\n f = open(image_list_file, 'r')\n filenames = []\n labels = []\n for line in f:\n filename, label = line[:-1].split(' ')\n filenames.append(filename)\n labels.append(int(label))\n return filenames, labels", "def prep_image_data(arg_dict):\n cat_df = pd.read_csv(arg_dict['category_file'],\n skiprows=1,\n sep='\\s+')\n bbox_df = pd.read_csv(arg_dict['bbox_file'],\n skiprows=1,\n sep='\\s+')\n img_dir = arg_dict['image_dir']\n\n combo_df = pd.merge(cat_df, bbox_df, how='outer', on='image_name')\n combo_df['image_name'] = combo_df['image_name'].apply(\n lambda x: x[len('img'):-len('.jpg')])\n labels = Labels(combo_df, img_dir, n_images_loaded=-1)\n labels.set_data_target('raw_image', chunksize=3000)\n return labels", "def image_classes():\n\n image_data_path = PROJECT_ROOT + \"/data/CUB_200_2011/\"\n\n # <class_id> <class_name>\n classes = open(image_data_path + \"classes.txt\").readlines()\n classes = [i.strip().split() for i in classes]\n\n # <image_id> <class_id>\n labels = open(image_data_path + \"image_class_labels.txt\").readlines()\n labels = [i.strip().split() for i in labels]\n\n class_ids = {}\n for i in classes:\n class_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels:\n label_ids[int(i[0])] = int(i[1])\n\n return class_ids, label_ids", "def read_label_data(mode, image_type):\n return np.loadtxt(parse_path(mode, image_type, True), dtype=int, delimiter='\\n')", "def get_label(img_path):\n img_name = img_path.stem\n label_name = img_name + \".txt\"\n label_path = img_path.parent / label_name\n with open(label_path) as f:\n label = json.load(f)\n return label", "def getList(self):\n labelMap = {}\n imageMap = {}\n key = []\n index = 0\n\n for root, dirs, files in os.walk(self.path_data):\n for file in files:\n # If .png or .jpg file found then\n if file.endswith(tuple(config.imageFormat)):\n key.append(index)\n labelMap[index] = preprocessing.getLabel(file)\n imageMap[index] = os.path.join(root, file)\n\n index += 1\n\n else:\n continue\n\n return key, imageMap, labelMap", "def extract_labels(filename, num_images, starting_id, context_factor):\n gt_imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n gt_imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(gt_imgs)\n # it means that we base our labels only on the core of the patch, not including the contet added\n context_factor = 0\n gt_patches = [img_crop_context(gt_imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor) for i in range(num_images)]\n data = np.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n labels = np.asarray([value_to_class(np.mean(data[i])) for i in range(len(data))])\n\n # Convert to dense 1-hot representation.\n return labels.astype(np.float32)", "def __read_img_file(filename, label):\n image = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)\n height, width, _ = image.shape\n image = cv2.resize(image, (img_size, img_size))\n # A label is consist of [y1, x1, y2, x2, class_idx]\n label = np.reshape(label, (-1, 5))\n rel_bboxes = label[..., 0:4] / np.array([height, width, height, width], np.float32)\n label = np.concatenate([rel_bboxes, np.expand_dims(label[..., -1], 1)], axis=-1)\n return image, label", "def load_pixel_labels(pixel_labels_dir, photo_id):\n\n pixel_labels_path = os.path.join(pixel_labels_dir, '%s.npy' % photo_id)\n if not os.path.exists(pixel_labels_path):\n raise ValueError('Could not find ground truth labels at \"%s\"' % pixel_labels_path)\n\n return np.load(pixel_labels_path)", "def load_pixel_labels(pixel_labels_dir, photo_id):\n\n pixel_labels_path = os.path.join(pixel_labels_dir, '%s.npy' % photo_id)\n if not os.path.exists(pixel_labels_path):\n raise ValueError('Could not find ground truth labels at \"%s\"' % pixel_labels_path)\n\n return np.load(pixel_labels_path)", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n return labels", "def read_labeled_image_list(image_list_file):\n\tf = open(image_list_file, 'r')\n\tfilenames = []\n\tlabels = []\n\tfor line in f:\n\t\tline = line.rstrip('\\n')\n\n\t\tfilename, _, label = line.partition(LABEL_SEP)#line[:-1].split(LABEL_SEP)\n\t\tfilenames.append(filename)\n\t\tlabels.append(int(label))\n\t\t#print (filename+LABEL_SEP+\":) \"+label)\n\treturn filenames, labels", "def __init__(self, path, type = 'mrk') :\n stim = np.loadtxt(path, skiprows = 1, usecols = (0,1), dtype = np.dtype(int))\n labels = np.loadtxt(path, skiprows = 1, usecols = 2, dtype = np.dtype(str))\n\n self.dic = dict.fromkeys(labels)\n for key, _ in self.dic.items() : self.dic[key] = []\n for k in range(len(stim)) :\n self.dic[labels[k]].append(stim[k, :])\n return None", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.label_detection(image=image)\n labels = response.label_annotations\n #print('Labels:')\n\n #for label in labels:\n # print(label.description)\n return labels", "def extract_labels(nlabels,filename, one_hot=False):\n print('Extracting', filename,'bbbccicicicicib')\n\n labels=numpy.loadtxt(filename,dtype='int64')\n \n if one_hot:\n print(\"LABELS ONE HOT\")\n print(labels.shape)\n XXX=dense_to_one_hot(labels,nlabels)\n print(XXX.shape)\n return dense_to_one_hot(labels,nlabels)\n print(\"LABELS\")\n print(labels.shape)\n return labels", "def read_rich_labels(path):\n\tlocation_dict = {}\n\twith open(os.path.join(path,'rich_labels.txt')) as f:\n\t\tcontent = f.readlines()\n\tfor line in content:\n\t\tlinecontent = line.split()\n\n\t\t# make sure each line is structured as follows:<image name> <latitude> <longitude>\n\t\tassert len(linecontent) >= 3, \"Unexpectedly short line in rich_labels.txt: \" + line\n\t\tif len(linecontent) > 3: \n\t\t\twarnings.warn('Unexpected line in rich_labels.txt: ' + line + \n\t\t\t \t\t\t '\\n Using first three words: ' + str(linecontent), stacklevel=0)\n\t\ttry:\n\t\t\tlocation_dict[linecontent[0]] = (float(linecontent[1]),float(linecontent[2]))\n\n\t\t\t# make sure you have latitude and longitude coordinates are not flipped\n\t\t\t# assuming that images are from North America\n\t\t\tassert float(linecontent[1]) <= float(linecontent[2])\n\n\t\texcept ValueError as e:\n\t\t\twarnings.warn(\"Unexpected lat/long in rich_labels.txt: \" + \n\t\t\t\t\t\t str(linecontent[1:3]), stacklevel=0)\n\treturn location_dict", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def file_reader(image_file, label_file):\n\n image = im.imread(image_file)\n\n with open(label_file, \"r\") as file:\n label = float(file.read())\n\n return image, label", "def _pickle_load(filename):\n with open(filename, 'rb') as f:\n save = pickle.load(f)\n image = save['image'].astype(np.float32)\n label = np.float32(save['label'])\n label = reformat_labels(label)\n return image, label", "def load_labels(path, encoding='utf-8'):\r\n with open(path, 'r', encoding=encoding) as f:\r\n lines = f.readlines()\r\n if not lines:\r\n return {}\r\n\r\n if lines[0].split(' ', maxsplit=1)[0].isdigit():\r\n pairs = [line.split(' ', maxsplit=1) for line in lines]\r\n return {int(index): label.strip() for index, label in pairs}\r\n else:\r\n return {index: line.strip() for index, line in enumerate(lines)}", "def load_labels():\n filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')\n global labels\n labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]", "def load_label(path: str) -> dict:\n if not os.path.exists(path):\n print(f\"Warning, try to load non-exist label {path}\")\n return None\n return np.load(path, allow_pickle=True).tolist()", "def read_label_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n ret = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n ret[int(pair[0])] = pair[1].strip()\n else:\n ret[row_number] = pair[0].strip()\n return ret", "def extract_labels(filename,tag,one_hot):\n print('Extracting labels',filename)\n return extractdb_labels(filename,tag,one_hot=one_hot)", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n response = client.label_detection(image=image)\n labels = response.label_annotations\n print('Labels:')\n return response", "def _read_labels(test_data=False):\n if not test_data:\n filename = os.path.join(FOLDER_PATH, 'train-labels.idx1-ubyte')\n else:\n filename = os.path.join(FOLDER_PATH, 't10k-labels.idx1-ubyte')\n if not os.path.exists(filename):\n raise ValueError('The file dose not exist.')\n \n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer([filename])\n \n # The first 8 bytes contain file information:\n # [offset] [type] [value] [description]\n # 0000 32 bit integer 0x00000801(2049) magic number\n # 0004 32 bit integer 60000/10000 number of items \n # ...(label value)\n header_bytes = 8\n # Every record consists of a label, with a fixed number of bytes for each.\n record_bytes = 1\n \n # Create a FixedLengthRecordReader to read record.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes,\n header_bytes=header_bytes)\n _, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8, then cast to int32.\n record = tf.cast(tf.decode_raw(value, tf.uint8), tf.int32)\n \n # Reshape from [1] to a scalar shape [].\n label = tf.reshape(record, [])\n\n return label", "def load_data(fname):\n pathname = \"data/\" + fname\n data = pickle.load(open(pathname, 'rb'), encoding='latin1')\n images = np.array([img[:-1] for img in data])\n ys = [int(img[-1]) for img in data]\n length = len(ys)\n labels = np.zeros((length, 10))\n\n for i in range(length):\n labels[i, ys[i]] = 1\n\n return images, labels", "def get_labels(label_file):\n labels = None\n with open(label_file, 'r') as infile:\n reader = csv.reader(infile)\n labels = dict((rows[0], rows[1]) for rows in reader)\n return labels", "def get_img_labels(task, nb_img=None):\n # Read the csv file matching the ids of the images with the classes\n labels = OrderedDict()\n\n with open('data/' + ('id_train' if task == 'training' else 'sample_submission4') + '.csv', 'rb') as csvfile:\n rows = reader(csvfile, delimiter=',')\n rows.next() # Skip the header\n for row in rows:\n if nb_img is not None and len(labels) >= nb_img:\n break\n labels[row[0]] = int(row[1]) # Integer conversion of the labels\n\n return labels", "def get_label_vectors():\n print(\"Retrieving label vectors...\")\n label_dict = {} # instantiate dict for labels:vectors\n categories = sorted([c for c in os.listdir('images/') if c[0] != '.']) # ignore hidden files\n x = np.zeros(len(categories)) # zero vector of number of categories\n for i, c in enumerate(categories): # get index and category for images\n y = x.copy() # use copy of x\n y[i] = 1 # set label index to true\n label_dict[c] = y.copy() # create label:vector\n\n return label_dict", "def extract_labels(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n return dense_to_one_hot(labels)", "def _load_images_and_labels(image_dir):\n\n print('Extracting images from: ', image_dir)\n\n image_paths = _load_image_paths(image_dir)\n images = _extract_images(image_paths)\n num_images = len(image_paths)\n labels = np.ones(num_images, dtype=np.int64)\n\n return images, labels", "def load_leaf():\n # List of image file names\n dataset_directory = os.path.join(root_directory,'Leaf_2')\n filenames = os.listdir(dataset_directory)\n filenames.sort()\n\n # List of bitmap Shapes; each row is a Image of the dataset\n data = []\n\n # Numpy array of labels associated to each class of image\n target = np.empty([len(filenames), ])\n\n previous_label = ''\n class_num = -1\n index = 0\n\n for index, filename in enumerate(filenames):\n data.append(Bitmap(io.imread(os.path.join(dataset_directory, filename))))\n file_label = filename.split('-')[0]\n\n if(previous_label != file_label):\n previous_label = file_label\n class_num += 1\n target[index] = class_num\n else:\n target[index] = class_num\n\n return {'bitmaps': data, 'targets': target}", "def _parse_raw_labels(self, lines):\r\n images = []\r\n labels = []\r\n idx = 0\r\n while idx < len(lines):\r\n image_path = lines[idx].strip()\r\n images.append(self._real_image_path(image_path))\r\n idx += 1\r\n\r\n num = int(lines[idx])\r\n idx += 1\r\n\r\n labels_ = []\r\n for _ in range(num):\r\n x1, y1, w, h, blur, expression, illumination, invalid, \\\r\n occlusion, pose = [int(v) \r\n for v in lines[idx].strip().split()]\r\n x2, y2 = x1 + w - 1, y1 + h - 1 # -1 to get the read x2, y2\r\n\r\n labels_.append([x1, y1, x2, y2])\r\n idx += 1\r\n \r\n labels.append(np.array(labels_))\r\n\r\n self._data_map[self._real_image_path(image_path)] = np.array(labels_)\r\n return np.array(images), np.array(labels)", "def extract_labels(filename, one_hot=False):\n\tprint('Extracting', filename)\n\twith gzip.open(filename) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2049:\n\t\t\traise ValueError('Invalid magic number %d in MNIST label file: %s' %(magic, filename))\n\t\tnum_items = _read32(bytestream)\n\t\tbuf = bytestream.read(num_items)\n\t\tlabels = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tif one_hot:\n\t\t\treturn dense_to_one_hot(labels)\n\t\treturn labels", "def label_mapping(filename):\n\n\t\n\n\n\twith open(filename, 'r') as infile:\n\t\treader = csv.reader(infile)\n\t\tnext(reader, None) # ignore first line since they're column labels\n\n\t\t#filename, artist, title, style, genre, date\n\t\tfor line in reader:\n\t\t\timg = line[0]\n\t\t\tartist = line[1]\n\t\t\tstyle = line[3]\n\t\t\tgenre = line[4]\n\t\t\tdate = re.findall(r'\\d+', line[5]) #parse any unwanted stuff\n\n\t\t\t#img and artist fields always present, no need to check\n\t\t\tartist_labels[img] = artist\n\n\n\t\t\tif style != '' and style in style_check:\n\t\t\t\t#if sum(x == style for x in style_labels.values()) < max_examples: # avoid imbalance\n\t\t\t\tstyle_labels[img] = style\n\n\n\t\t\tif genre != '' and genre in genre_check:\n\t\t\t\t#if sum(x == genre for x in genre_labels.values()) < max_examples:\n\t\t\t\tgenre_labels[img] = genre\n\n\n\t\t\tif len(date) > 0:\n\t\t\t\tbucket_len = 10 #buckets of 10 years\n\t\t\t\tbucket = (int(date[0]) // bucket_len) * bucket_len \n\t\t\t\tperiod = str(bucket) + '-' + str(bucket + (bucket_len - 1))\n\n\t\t\t\tif period in date_check:\n\t\t\t\t\t#if sum(x == period for x in date_labels.values()) <= max_examples:\n\t\t\t\t\tdate_labels[img] = period #parsed_date", "def mapping_image_to_label (self, labels_df, polygons, fpath_tiff): \n \n unread_tiff = rasterio.open(fpath_tiff)\n\n #Projecting the coordinates to that CRS \n proj = Proj(init='epsg:32618')\n data = []\n labels = []\n failed = []\n \n src = rasterio.open(fpath_tiff, 'r')\n outfolder = '/train/batch'\n \n print (\"Hold on tight! Mapping each image to its respective label...\")\n \n \n for num, row in labels_df.iterrows():\n try:\n \n \n roof_material_num = 0\n polygon0 = polygons [num]\n polygon0['coordinates'] = self.transforming_coordinates(polygon0['coordinates'], proj)\n masked_image, out_transform = rasterio.mask.mask(src,[polygon0], filled = True, crop=True, nodata = 0)\n img_image = reshape_as_image (masked_image)\n \n #Defining the name of the image file as \"buildingID+roofMaterial+png\" and its path \n img_path = os.path.join (outfolder, str (row['id'])+'-'+ str (row['roof_material'])+'.png')\n \n #swapping the color channels from RGB2BGR\n img_image = cv2.cvtColor (img_image, cv2.COLOR_RGB2BGR) #img_image is a numpy array\n \n #resizing the image dimensions to 128x128 to match ImageNet dimensions\n img_image = cv2.resize(img_image, (128, 128))\n \n #writing the image in the file\n #cv2.imwrite (img_path, img_image)\n # update the data and labels lists, respectively\n data.append(img_image) #data is a list\n labels.append(row['roof_material'])\n \n except Exception as e:\n print (e)\n failed.append (num)\n \n \n #print number of images we failed to crop and write \n print (\"Bad News First: Failed to write\", len(failed), \"Images.\")\n print (\"Good News: Successfully mapped\", len (data), \"Images.\")\n data = np.array(data)\n labels = np.array(labels)\n #batch = data.sample(frac=0.5, replace=False, random_state=1)\n #print(\"Size and shape of validY: {}\\n\".format(batch.shape))\n return data, labels", "def load_letter(folder,label,image_size=28,sample_num=-1):\n\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size),\n dtype=image_data_type)\n num_images = 0\n if sample_num == -1:\n sample_num = len(image_files)\n for image in image_files:\n image_file = os.path.join(folder, image)\n try:\n image_data = ndimage.imread(image_file).astype(image_data_type)\n if image_data.shape != (image_size, image_size):\n raise Exception('Unexpected image shape: %s' % str(image_data.shape))\n dataset[num_images, :, :] = image_data\n num_images = num_images + 1\n if num_images >= sample_num:\n break\n except IOError as e:\n print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\n\n dataset = dataset[0:num_images, :, :]\n data_label = np.ndarray(shape=(num_images), dtype=np.int8)\n data_label.fill(label)\n return dataset,data_label", "def extract_labels(filename, one_hot=False):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = _read32(bytestream)[0]\n #print('check', magic, num_items)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n if one_hot:\n return dense_to_one_hot(labels)\n return labels", "def load_tiny_imagenet(directory):\n path_train, path_val, path_test = directory + '/train', directory + '/val', directory + '/test'\n labels = os.listdir(path_train)\n train_data = []\n train_labels = []\n for label in labels:\n imgs_path = os.path.join(path_train, label, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n train_data.append(img)\n train_labels.append(label)\n train_data = np.concatenate(train_data)\n train_labels = np.array(train_labels, dtype='str')\n \n test_data = []\n test_labels = []\n with open(path_val+'/val_annotations.txt', 'r') as f:\n val_annotations = [line.strip().split('\\t') for line in f]\n val_annotations = np.array(val_annotations)\n imgs_path = os.path.join(path_val, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n test_data.append(img)\n label = val_annotations[val_annotations[:, 0] == img_name, 1].astype('U9')\n test_labels.append(label)\n test_data = np.concatenate(test_data)\n test_labels = np.concatenate(test_labels)\n test_labels = np.array(test_labels, dtype='str')\n \n _, train_labels = np.unique(train_labels, return_inverse=True)\n _, test_labels = np.unique(test_labels, return_inverse=True)\n \n del r, g, b, label, labels, imgs_path, img_name, img, imgs, val_annotations\n \n return train_data, train_labels, test_data, test_labels", "def load_matrix(self, src_dir, key_word=\"funneled\"):\r\n X = []\r\n Y = []\r\n label = 0\r\n for root, dirs, files in os.walk(src_dir):\r\n if files != []:\r\n for file in files:\r\n if key_word in file:\r\n img = cv2.imread(os.path.join(root, file), cv2.IMREAD_GRAYSCALE)\r\n min_value = np.min(img)\r\n max_value = np.max(img)\r\n X.append((img.flatten() - min_value)/(max_value - min_value)) # Normalize the data to [0, 1]\r\n Y.append(label)\r\n label +=1\r\n \r\n return dict(X = np.asarray(X), \r\n Y = np.asarray(Y))", "def createDictionaryFromFile(inputfile):\n logger.info('loading file: %s' % inputfile)\n dic = {}\n with open(inputfile) as fin:\n for n, line in enumerate(fin, start=1):\n arr = line.strip().split()\n path = arr[0]\n\n labels = []\n for label in arr[1:]:\n labels.append(ast.literal_eval(label))\n\n cpath = path.split('/')\n id_img = int(cpath[-1].replace('.jpg', ''))\n size_img = cpath[-2]\n activity = cpath[-3]\n id_data = int((cpath[-4])[-1])\n home = '/'.join(cpath[:-4])\n\n if dic.has_key(id_data):\n if dic[id_data].has_key(activity):\n if dic[id_data][activity].has_key(size_img):\n dic[id_data][activity][size_img][id_img] = labels\n else:\n dic[id_data][activity][size_img] = {id_img: labels}\n else:\n dic[id_data][activity] = {size_img: {id_img: labels}}\n else:\n dic[id_data] = {activity: {size_img: {id_img: labels}}}\n return n, home, dic", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def hume_matfile_loader(matfile_path):\n mat_struct = loadmat(matfile_path)\n\n # build a list of keys and values for each entry in the structure\n vals = mat_struct['stageData'][0, 0] # <-- set the array you want to access.\n keys = mat_struct['stageData'][0, 0].dtype.descr\n\n # Assemble the keys and values into variables with the same name as that used in MATLAB\n mat_dict = {}\n for i in range(len(keys)):\n key = keys[i][0]\n if len(vals[key].shape) > 1 and vals[key].shape[0] > vals[key].shape[1]:\n vals[key] = vals[key].T\n if len(vals[key][0]) > 1:\n val = np.squeeze(vals[key][0])\n else:\n val = np.squeeze(vals[key][0][0]) # squeeze is used to covert matlat (1,n) arrays into numpy (1,) arrays.\n mat_dict[key] = val\n\n return mat_dict", "def get_output(path, label_file = None):\n img_id = path.split('/')[-1]\n labels = label_file.loc[img_id].values\n return labels", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = []\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n #if len(pair) == 2 and pair[0].strip().isdigit():\n labels.append(np.array([int(pair[0].strip()),pair[1].strip()]))\n #else:\n # labels.append(pair[0].strip())\n return np.array(labels)", "def read_dataset(data_txt_file, image_data_path):\n data = {}\n data['image'] = []\n data['label'] = []\n\n indexFile = open(data_txt_file, 'r')\n for sample in indexFile:\n sample = sample.split(',')\n\n _id = sample[0]\n label = int(sample[1])\n imageData = io.imread(image_data_path+_id+'.jpg')\n\n data['label'].append(label)\n data['image'].append(imageData)\n\n data['image'] = np.array(data['image'])\n data['label'] = np.array(data['label'])\n\n return data", "def read_data(case_dir):\n dict_images = dict()\n list_files = ['MR_512.nii.gz', 'landmarks_512.csv', ]\n # In fact, there is no Mask during inference, so we cannot load it.\n\n for file_name in list_files:\n file_path = case_dir + '/' + file_name\n assert os.path.exists(file_path), case_dir + ' does not exist!'\n\n if file_name.split('.')[-1] == 'csv':\n landmarks = pd.read_csv(file_path)\n dict_images['list_landmarks'] = landmark_extractor(landmarks)\n elif file_name.split('.')[0].split('_')[0] == 'MR':\n dict_images['MR'] = sitk.ReadImage(file_path, sitk.sitkFloat32)\n dict_images['MR'] = sitk.GetArrayFromImage(dict_images['MR'])[np.newaxis, :, :, :]\n elif file_name.split('.')[0].split('_')[0] == 'Mask':\n dict_images['Mask'] = sitk.ReadImage(file_path, sitk.sitkInt16)\n dict_images['Mask'] = sitk.GetArrayFromImage(dict_images['Mask'])[np.newaxis, :, :, :]\n\n return dict_images", "def get_label_dict(self):\n with open(self.labels_file_path, mode='r') as f:\n label_file = f.read()\n\n inverse_label_dict = json.loads(label_file)\n label_dict = {int(value): key for key,\n value in inverse_label_dict.items()}\n return label_dict", "def get_labels(self):\n\n print 'Loading label data from', self.label_file, '...'\n labels = {}\n with open(self.label_file, 'rb') as f:\n f.next() # skip header line\n for line in f:\n index, answer = line.rstrip('\\n').split(',')\n labels[index] = answer\n\n return labels", "def load_mnist(dataset=\"training\", digits=np.arange(10), path=\".\"):\n\n if dataset == \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset == \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n flbl = open(fname_lbl, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n lbl = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(fname_img, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = pyarray(\"B\", fimg.read())\n fimg.close()\n\n ind = [ k for k in range(size) if lbl[k] in digits ]\n N = len(ind)\n\n images = zeros((N, rows, cols), dtype=uint8)\n labels = zeros((N, 1), dtype=int8)\n for i in range(len(ind)):\n images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))\n labels[i] = lbl[ind[i]]\n\n return images, labels", "def build_features_dict(image, image_id, filename, image_format=None,\n bboxes=None, masks=None, label_ids=None,\n label_names=None, masks_format=\"png\"):\n\n # Add channel dimension if needed.\n if len(image.shape) == 3:\n pass\n elif len(image.shape) == 2:\n image = np.expand_dims(image, -1)\n else:\n raise Exception(f\"Wrong image shape: {image.shape}\")\n\n # Get image shape.\n image_width, image_height, image_channel = image.shape\n\n # Encode image.\n image_encoded = imaging.encode_image(image, image_format)\n\n # Create te feature dict.\n feature_dict = {}\n\n # Image features\n feature_dict['image_height'] = int64_feature(image_height)\n feature_dict['image_width'] = int64_feature(image_width)\n feature_dict['image_channel'] = int64_feature(image_channel)\n feature_dict['image_filename'] = bytes_feature(filename.encode('utf8'))\n feature_dict['image_id'] = bytes_feature(str(image_id).encode('utf8'))\n feature_dict['image_encoded'] = bytes_feature(image_encoded.numpy())\n feature_dict['image_format'] = bytes_feature(image_format.encode('utf8'))\n\n # Object features\n if bboxes is not None:\n if bboxes.shape[0] > 0:\n bboxes_x = bboxes[:, 0]\n bboxes_y = bboxes[:, 1]\n bboxes_width = bboxes[:, 2]\n bboxes_height = bboxes[:, 3]\n else:\n bboxes_x = []\n bboxes_y = []\n bboxes_width = []\n bboxes_height = []\n\n feature_dict['bboxes_x'] = float_list_feature(bboxes_x)\n feature_dict['bboxes_y'] = float_list_feature(bboxes_y)\n feature_dict['bboxes_width'] = float_list_feature(bboxes_width)\n feature_dict['bboxes_height'] = float_list_feature(bboxes_height)\n\n if label_ids is not None:\n feature_dict['label_ids'] = int64_list_feature(label_ids)\n\n if label_names is not None:\n feature_dict['label_names'] = bytes_list_feature(label_names)\n\n if masks is not None:\n # Encode masks.\n masks_encoded = []\n for mask in masks:\n mask = image = np.expand_dims(mask, -1)\n mask_encoded = imaging.encode_image(mask, masks_format)\n masks_encoded.append(mask_encoded.numpy())\n\n feature_dict['masks_encoded'] = bytes_list_feature(masks_encoded)\n feature_dict['masks_format'] = bytes_feature(masks_format.encode(\"utf8\"))\n\n return feature_dict", "def label_visualize(img_dir):\n img = scipy.misc.imread(img_dir).astype(np.uint8)\n yo = np.nonzero(img == 1)\n visual = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n\n for i in range(0, 34):\n index = np.nonzero(img == i)\n visual[index + (0,)] = labels[i][0]\n visual[index + (1,)] = labels[i][1]\n visual[index + (2,)] = labels[i][2]\n\n scipy.misc.imsave('./' + img_dir.split('/')[-1], visual)", "def get_pet_labels(image_dir):\n results_dic = dict()\n \n# # Retrieves the file names from the folder specified as 'image_dir' \n filenames_list = listdir(image_dir)\n \n# # Processes the filenames to create the pet image labels\n# # Retrieves the filenames from folder pet_images/\n for i in range (0, len(filenames_list), 1):\n# # Skips file if starts with . (like .DS_Store of Mac OSX) because it \n# # isn't an pet image file\n if filenames_list[i][0] != \".\":\n# # Reads respectively indexed element from filenames_list into temporary string variable 'pet_image' \n pet_image = filenames_list[i]\n# # Sets all characters in 'pet_image' to lower case \n pet_image_lower = pet_image.lower()\n# # Creates list called 'pet_image_word_list' that contains every element in pet_image_lower seperated by '_'\n pet_image_word_list = pet_image_lower.split(\"_\")\n# # Creates temporary variable 'pet_label' to hold pet label name extracted starting as empty string\n pet_image_alpha = \"\"\n# # Iterates through every word in 'pet_image_word_list' and appends word to 'pet_label_alpha' only if word consists \n# # purely of alphabetic characters \n for word in pet_image_word_list:\n if word.isalpha():\n pet_image_alpha += word + \" \"\n# # Removes possible leading or trailing whitespace characters from 'pet_pet_image_alpha' and add stores final label as 'pet_label' \n pet_label = pet_image_alpha.strip()\n\n# # Adds the original filename as 'key' and the created pet_label as 'value' to the 'results_dic' dictionary if 'key' does \n# # not yet exist in 'results_dic', otherwise print Warning message \n if filenames_list[i] not in results_dic:\n results_dic[filenames_list[i]] = [pet_label]\n else:\n print(\"** Warning: Key = \", filenames_list[i], \" already in 'results_dic' with value = \", results_dic[filenames_list[i]])\n \n# # Iterates through the 'results_dic' dictionary and prints its keys and their associated values\n print(\"\\nPrinting: All 'key' - 'value' pairs in dictionary results_dic: \")\n for key in results_dic:\n print(\"Filename = \", key, \" Pet Label = \", results_dic[key])\n \n# # Returns results_dic\n return results_dic", "def decode_labels(mask, num_images=1, num_classes=21, task='seg'):\n n, h, w, c = mask.shape\n assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)\n outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)\n for i in range(num_images):\n if task == 'normal':\n outputs[i] = mask[i]\n elif task == 'seg':\n img = Image.new('RGB', (w, h), (255, 255, 255)) # unlabeled part is white (255, 255, 255)\n pixels = img.load()\n for j_, j in enumerate(mask[i, :, :, 0]):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_, j_] = label_colours[k]\n outputs[i] = np.array(img)\n else:\n raise Exception('task name is not recognized!')\n\n return outputs", "def load_imgsLabels(self, image_paths):\n \n# label = image_paths[-1]\n \n images = self.load_images(image_paths)\n \n images = self.resize_images(images)\n \n images_list = self.greyscale_images(images)\n\n return images_list", "def load_imagenet(directory):\n path_train, path_val = directory + '/ILSVRC2012_img_train', directory + '/ILSVRC2012_img_val'\n train_labels = os.listdir(path_train)\n train_data = []\n for label in train_labels:\n imgs_path = os.path.join(path_train, label)\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n train_data.append(img)\n train_labels.append(label)\n train_data = np.concatenate(train_data)\n train_labels = np.array(train_labels, dtype='str')\n \n test_labels = os.listdir(path_val)\n test_data = []\n for label in test_labels:\n imgs_path = os.path.join(path_val, label)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n test_data.append(img)\n test_labels.append(label)\n test_data = np.concatenate(test_data)\n test_labels = np.array(test_labels, dtype='str')\n \n _, train_labels = np.unique(train_labels, return_inverse=True)\n _, test_labels = np.unique(test_labels, return_inverse=True)\n \n del r, g, b, imgs_path, img_name, img, imgs\n \n return train_data, train_labels, test_data, test_labels", "def load_data(data_file):\n data = pickle.load(open(data_file, \"rb\"))\n images = data[\"images\"]\n labels = data[\"labels\"]\n\n return images, labels" ]
[ "0.7442581", "0.67145514", "0.6680717", "0.66700083", "0.6651974", "0.6599294", "0.65706545", "0.6568262", "0.65624034", "0.65466106", "0.6527709", "0.65229243", "0.65100825", "0.6500305", "0.649048", "0.6466592", "0.6466018", "0.6442053", "0.6429563", "0.6409631", "0.63989353", "0.6398331", "0.6392108", "0.63798136", "0.63793224", "0.63647455", "0.6362771", "0.63621897", "0.6351548", "0.6340121", "0.6311418", "0.63105685", "0.6301381", "0.6298731", "0.629819", "0.62820655", "0.6281838", "0.6269306", "0.6267734", "0.62660563", "0.62660563", "0.62610793", "0.62308514", "0.62302977", "0.62213755", "0.62192297", "0.62170714", "0.62042874", "0.6204238", "0.6200295", "0.6173856", "0.6173856", "0.61581635", "0.61481947", "0.61384934", "0.61376095", "0.61330664", "0.6125069", "0.61185175", "0.61180514", "0.6090818", "0.6082656", "0.6071819", "0.6068334", "0.6067384", "0.6058544", "0.60574967", "0.6052241", "0.6052068", "0.6048583", "0.60455567", "0.60393196", "0.6034254", "0.6014672", "0.60122997", "0.5985232", "0.5973246", "0.5965362", "0.59591544", "0.59546065", "0.59541255", "0.59513205", "0.5950326", "0.59467643", "0.5935829", "0.5924551", "0.5921776", "0.5919206", "0.59134555", "0.5908433", "0.5904006", "0.58992493", "0.58726734", "0.58699334", "0.58655876", "0.5853104", "0.58282125", "0.5825043", "0.5816673", "0.5813983" ]
0.8415674
0
ref CLRS pg326, solution to the basic supply chain problem using the book notation for variables name
def fastestWay( a, t, e, x, n ): import pdb;pdb.set_trace() f1.append( ( e[0] , 1 ) ) f2.append( ( e[1] , 2 ) ) for i in xrange(n): f11 = f1[i][0]+a[0][i] f12 = f2[i][0]+a[1][i]+t[1][i+1] f22 = f2[i][0]+a[1][i] f21 = f1[i][0]+a[0][i]+t[0][i+1] f1.append( ( min( f11, f12 ), 1 ) if f11 < f12 else ( min( f11, f12 ), 2 ) ) f2.append( ( min( f21, f22 ), 2 ) if f22 < f21 else ( min( f22, f21 ), 1 ) ) f1x, f2x = f1[n][0]+x[0], f2[n][0]+x[1] return ( min( f1x, f2x ) , f1 ) if f1x < f2x else ( min( f1x, f2x ), f2 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exercise_b2_39():\r\n pass", "def exercise_b2_113():\r\n pass", "def exercise_b2_93():\r\n pass", "def exercise_b2_98():\r\n pass", "def exercise_b2_27():\r\n pass", "def exercise_b2_106():\r\n pass", "def exercise_b2_53():\r\n pass", "def exercise_b2_43():\r\n pass", "def exercise_b2_69():\r\n pass", "def name_supply(stems=string.ascii_lowercase, drop_zero=True):\n k = 0\n while 1:\n for a in stems:\n yield a+str(k) if (k or not drop_zero) else a\n k = k+1", "def exercise_b2_107():\r\n pass", "def _block_name_base(stage, block):\n if block < 27:\n block = \"%c\" % (block + 97) # 97 is the ascii number for lowercase 'a'\n conv_name_base = \"res\" + str(stage) + block + \"_branch\"\n bn_name_base = \"bn\" + str(stage) + block + \"_branch\"\n return conv_name_base, bn_name_base", "def exercise_b2_82():\r\n pass", "def exercise_b2_70():\r\n pass", "def exercise_b2_52():\r\n pass", "def exercise_b2_95():\r\n pass", "def exercise_b2_26():\r\n pass", "def chain_full_name(alignment, chain):\n return '%s_%s' % (alignment, chain)", "def exercise_b2_56():\r\n pass", "def _make_vbenf_label(chain_parts):\n\n # toy label for development: run simple and dijet independently.\n # simple makes Et cuts on two jets. Independently (sharing possible)\n # of jets choosean by simple, the dijet\n # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n assert scenario.startswith('vbenf')\n args = _args_from_scenario(scenario)\n if not args:\n return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' \n arg_res = [\n re.compile(r'(?P<lo>\\d*)(?P<key>fbet)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>mass)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)'),\n ]\n\n defaults = {\n 'et': ('101', 'inf'),\n 'mass': ('800', 'inf'),\n 'fbet': ('501', 'inf'),\n }\n\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n and\n (\n []\n simple\n (\n [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)]\n )\n combgen\n (\n [(10et, 0eta320)]\n dijet\n (\n [(%(masslo).0fdjmass, 26djdphi)]\n ) \n simple\n (\n [(10et, 0eta320)(20et, 0eta320)]\n )\n )\n )\"\"\" % argvals", "def main():\n var_name = prompt_for_var_name()\n var_words = normalize_var_name(var_name)\n for case in CASES:\n out_var_name = render_case(var_words, case)\n print(out_var_name)", "def getParseParams(residue, name):\n atomname = name\n resname = residue.name\n\n # Terminal/Water Substitutions\n\n nterm = residue.get(\"isNterm\")\n cterm = residue.get(\"isCterm\")\n if nterm and resname != \"ACE\":\n if resname == \"PRO\" and nterm == 2:\n resname = \"PR+\"\n if atomname == \"H2\":\n atomname = \"HN1\"\n elif atomname == \"H3\":\n atomname = \"HN2\"\n elif resname == \"PRO\" and nterm == 1:\n resname = \"PRN\"\n if atomname == \"H2\" or atomname == \"H3\": atomname = \"HN\"\n elif nterm == 2: # Neutral\n if atomname in [\"N\", \"H\", \"H2\", \"H3\", \"CA\", \"HA\", \"C\", \"O\"]:\n resname = \"BKN\"\n if atomname == \"H\":\n atomname = \"H1\"\n if atomname == 'H3':\n atomname = 'H2'\n elif nterm == 3: # Positive\n if atomname in [\"N\", \"H\", \"H2\", \"H3\", \"CA\", \"HA\", \"C\", \"O\"]:\n resname = \"BK+\"\n if atomname == \"H\": atomname = \"H1\"\n elif cterm:\n if atomname == \"O\":\n atomname = \"O1\"\n elif atomname == \"OXT\":\n atomname = \"O2\"\n if cterm == 1 and atomname in [\"N\", \"H\", \"HA\", \"CA\", \"C\", \"O1\", \"O2\"]:\n resname = \"BK-\"\n elif cterm == 2 and atomname in [\"N\", \"H\", \"HA\", \"CA\", \"C\", \"O1\", \"O2\", \"HO\"]:\n if atomname == \"HO\": atomname = \"H2\"\n resname = \"BKC\"\n # print 'Cterm resname is',resname\n elif residue.get(\"type\") == 3:\n resname = \"H2O\"\n if atomname == \"O\":\n atomname = \"OH\"\n elif atomname == \"H1\":\n atomname = \"HH1\"\n elif atomname == \"H2\":\n atomname = \"HH2\"\n\n # Residue Substitutions\n if resname == \"HSD\":\n resname = \"HID\"\n elif resname in [\"HIE\", \"HSE\"]:\n resname = \"HIS\"\n elif resname in [\"HIP\", \"HSP\"]:\n resname = \"HI+\"\n elif resname == \"ILE\":\n if atomname == \"HG12\":\n atomname = \"HG11\"\n elif atomname == \"HG13\":\n atomname = \"HG12\"\n elif atomname == \"CD\":\n atomname = \"CD1\"\n elif resname == \"CYS\" and \"HG\" not in residue.get(\"map\"):\n resname = \"CSS\"\n #\n # Histidine\n #\n elif resname == \"HIS\":\n if \"HD1\" in residue.get(\"map\") and \"HE2\" in residue.get(\"map\"):\n resname = \"HI+\"\n elif \"HD1\" in residue.get(\"map\"):\n resname = \"HID\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"HIS\"\n elif resname == \"GLU\" or resname == \"GLH\":\n if \"HE1\" in residue.get(\"map\"):\n resname = \"GL0\"\n if atomname == \"HE1\":\n atomname = \"HE2\"\n elif atomname == \"OE1\":\n atomname = \"OE2\"\n elif atomname == \"OE2\":\n atomname = \"OE1\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"GL0\"\n elif resname == \"ASP\" or resname == \"ASH\":\n if \"HD1\" in residue.get(\"map\"):\n resname = \"AS0\"\n if atomname == \"HD1\":\n atomname = \"HD2\"\n elif atomname == \"OD1\":\n atomname = \"OD2\"\n elif atomname == \"OD2\":\n atomname = \"OD1\"\n elif \"HD2\" in residue.get(\"map\"):\n resname = \"AS0\"\n elif resname == \"ACE\":\n if atomname == \"HH31\":\n atomname = \"HA1\"\n elif atomname == \"HH32\":\n atomname = \"HA2\"\n elif atomname == \"HH33\":\n atomname = \"HA3\"\n elif atomname == \"CH3\":\n atomname = \"CA\"\n elif resname == \"TYR\":\n if not \"HH\" in residue.get(\"map\"):\n resname = \"TYM\"\n elif resname == \"TYM\":\n resname = \"TY-\"\n elif resname == \"CYM\":\n resname = \"CY-\"\n elif resname == \"LYN\":\n resname = \"LY0\"\n #\n # Neutral LYS and neutral ARG detection based on hydrogens - added by Jens\n #\n elif resname == \"LYS\":\n if not \"HZ3\" in residue.get(\"map\"):\n resname = \"LY0\"\n elif resname == \"ARG\":\n if not \"HE\" in residue.get(\"map\"):\n resname = \"AR0\"\n elif resname == \"NME\":\n resname = \"N-M\"\n if atomname == \"CH3\":\n atomname = \"CA\"\n elif atomname == \"H\":\n atomname = \"H1\"\n elif atomname.startswith(\"HH\"):\n atomname = \"HA\" + atomname[-1]\n\n # Hydrogen Substitutions\n\n if atomname == \"H\":\n atomname = \"HN\"\n elif atomname == \"HA2\":\n atomname = \"HA1\"\n elif atomname == \"HA3\":\n atomname = \"HA2\"\n elif atomname == \"HB2\" and resname not in [\"ALA\"]:\n atomname = \"HB1\"\n elif atomname == \"HB3\" and resname not in [\"ALA\"]:\n atomname = \"HB2\"\n elif atomname == \"HD2\" and resname not in [\"HIS\", \"HI+\", \"HID\", \"AS0\"]:\n atomname = \"HD1\"\n elif atomname == \"HD3\" and resname not in [\"HIS\", \"HI+\", \"HID\"]:\n atomname = \"HD2\"\n elif atomname == \"HE2\" and resname not in [\"TRP\", \"HIS\", \"HI+\", \"HID\", \"GL0\"]:\n atomname = \"HE1\"\n elif atomname == \"HE3\" and resname not in [\"TRP\", \"HIS\", \"HI+\", \"HID\"]:\n atomname = \"HE2\"\n elif atomname == \"HG2\":\n atomname = \"HG1\"\n elif atomname == \"HG3\":\n atomname = \"HG2\"\n elif atomname == \"HZ2\" and resname == \"LY0\":\n atomname = \"HZ1\"\n elif atomname == \"HZ3\" and resname == \"LY0\":\n atomname = \"HZ2\"\n\n return resname, atomname", "def test_expanded_name( self ):\n\t\tself.doBasicTest(\n\t\t\t'''s := something +\n\t\t\t>something< := r\n\t\t\tr := [ab]\n\t\t\tv := [c]\n\t\t\t''',\n\t\t\t's',\n\t\t\t'abammmab',\n\t\t\t(1,[\n\t\t\t\t('r',0,1, NullResult),\n\t\t\t\t('r',1,2, NullResult),\n\t\t\t\t('r',2,3, NullResult),\n\t\t\t],3)\n\t\t)", "def test_bnd_mate_name():\n\n mate = bnd_mate_name(ALT, \"2\")\n assert mate == \"17\"", "def test_initialization_of_TCRsubset_alpha_beta_case_chain_names():\n assert isinstance(dist_a_subset, pd.DataFrame)\n assert isinstance(dist_b_subset, pd.DataFrame)\n assert isinstance(clone_df_subset, pd.DataFrame)\n TCRsubset(clone_df = clone_df_subset, \n organism = \"mouse\",\n epitopes = [\"PA\"] ,\n epitope = \"PA\",\n chains = [\"alpha\",\"beta\"],\n dist_a = dist_a_subset,\n dist_b = dist_b_subset)", "def part_2():\n instructions[\"b\"] = 46065\n print(\"Part 2: \" + str(evaluate(\"a\")))", "def name(n, b=\"s\"):\n print(\"This is the \" + n + b)\n return print(\"Hi\")", "def var():\n def _var(quoted_name):\n name = quoted_name.subexpression.name\n if (value := get_name(name)) is not None:\n return value\n else:\n raise TypeError(f\"Binding {name} not found\")\n yield (\"(λ &[name] . any)\", _var)", "def _expandVariables (self, st : String) -> String:\n\n Logging.trace(\">>: %r\", st)\n cls = self.__class__\n\n # collect identifiers embedded in value and replace them by\n # their value\n ParseState_inLimbo = 0\n ParseState_inString = 1\n ParseState_inEscape = 2\n ParseState_inIdentifier = 3\n parseStateToString = { 0 : \"-\", 1 : \"S\",\n 2 : cls._escapeCharacter, 3 : \"I\" }\n\n parseState = ParseState_inLimbo\n result = \"\"\n identifier = \"\"\n fsaTrace = \"\"\n\n for ch in st:\n # process finite state automaton with three states based\n # on next character in string\n fsaTrace += (iif(fsaTrace == \"\", \"\", \" \")\n + \"[%s] %s\" % (parseStateToString[parseState], ch))\n\n if parseState == ParseState_inLimbo:\n if cls._identifierCharRegExp.search(ch):\n identifier = ch\n parseState = ParseState_inIdentifier\n else:\n result += ch\n if ch == cls._doubleQuoteCharacter:\n parseState = ParseState_inString\n elif parseState == ParseState_inString:\n result += ch\n if ch == cls._doubleQuoteCharacter:\n parseState = ParseState_inLimbo\n elif ch == cls._escapeCharacter:\n parseState = ParseState_inEscape\n elif parseState == ParseState_inEscape:\n result += ch\n parseState = ParseState_inString\n elif parseState == ParseState_inIdentifier:\n if cls._identifierCharRegExp.search(ch):\n identifier += ch\n else:\n identifierValue = self._findIdentifierValue(identifier)\n result += identifierValue\n result += ch\n parseState = iif(ch == cls._doubleQuoteCharacter,\n ParseState_inString, ParseState_inLimbo)\n\n if parseState == ParseState_inIdentifier:\n identifierValue = self._findIdentifierValue(identifier)\n result += identifierValue\n \n Logging.trace(\"--: accumulatedFSATrace = %s\", fsaTrace)\n Logging.trace(\"<<: %r\", result)\n return result", "def psea(pname): # -> str:\n ...", "def test_get_alias():\n c = Curve(data=np.linspace(1, 20, 2), mnemonic='DT')\n alias = {'Sonic': ['DT', 'foo']}\n assert c.get_alias(alias) == ['Sonic']", "def printname(bruce):", "def _make_dijet_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario.startswith('dijet')\n\n arg_res = [\n re.compile(r'^(?P<lo>\\d*)(?P<key>djmass)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j1et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j1eta)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j2et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j2eta)(?P<hi>\\d*)$'),\n ]\n\n defaults = {\n 'j1et': ('100', 'inf'),\n 'j2et': ('100', 'inf'),\n 'j1eta': ('0', '320'),\n 'j2eta': ('0', '320'),\n 'djmass': ('1000', 'inf'),\n }\n\n\n args = _args_from_scenario(scenario)\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n combgen(\n [(2)(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n (%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n ]\n \n dijet(\n [(%(djmasslo).0fdjmass)])\n simple([(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n (%(j2etlo).0fet, %(j2etalo).0feta%(j2etahi).0f)])\n )\"\"\" % argvals", "def procs2(variable=\"\", category=\"\"):\n assert variable\n assert category\n out = {}\n\n # out = {\"VV\": [\"*VV\", \"*singleT\"],\n # \"ZLL\": [\"*ZLL\"],\n # }\n\n # out[\"W+QCD\"] = [\"W\", \"QCD\"]\n\n return out", "def symbolic_objective(ingredients) -> Tuple[List[sp.Symbol], sp.Eq]:\n assignments = sp.symbols(' '.join(ingredients.keys()))\n\n # Skip negative logic due to differentiability requirements\n objective = 1\n for attribute in filter(lambda x: x != \"calories\", next(iter(ingredients.values())).keys()):\n objective *= sum(ingredients[str(x)][attribute] * x for x in assignments)\n\n return assignments, objective", "def __init__(self,\n point_size: int,\n max_levels=6,\n min_levels=3,\n mutation_prob=0.5\n ) -> None:\n self.rec_refs = {}\n self.mutation_prob = mutation_prob\n\n vars1 = []\n vars2 = []\n for i in range(point_size):\n vars1 += [f\"X1[{i}]\"]\n\n for i in range(point_size):\n vars2 += [f\"X2[{i}]\"]\n\n self.grammar = {\n **{f\"<expr_{i}>\": [f\"<expr_{i+1}> <op> <expr_{i+1}>\", f\"<func> ( <expr_{i+1}> <op> <expr_{i+1}> )\"] for i in range(min_levels)},\n **{f\"<expr_{min_levels + i}>\": [f\"<expr_{min_levels + i+1}> <op> <expr_{min_levels + i+1}>\", f\"<func> ( <expr_{min_levels + i + 1}> <op> <expr_{min_levels + i + 1}> )\", \"<term>\"] for i in range(max_levels - min_levels)},\n f\"<expr_{max_levels}>\": [\"<term_1> <op> <term_2>\", \"<term_2> <op> <term_1>\"],\n \"<term>\": [\n \"<term_1>\", \"<term_2>\"\n ],\n \"<term_1>\": [\n \"<var_1>\",\n \"<pre-op> ( <var_1> )\",\n ],\n \"<term_2>\": [\n \"<var_2>\",\n \"<pre-op> ( <var_2> )\",\n ],\n \"<pre-op>\": [\n \"1/\",\n \"-\",\n \"+\",\n \"abs\",\n \"numpy.math.sqrt\"\n ],\n \"<func>\": [\n \"abs\",\n \"\"\n ],\n \"<op>\": [\n \"+\",\n \"*\",\n \"-\",\n \"/\",\n ],\n \"<var_1>\": vars1,\n \"<var_2>\": vars2,\n }\n\n self.non_terminals = sorted(self.grammar.keys())\n\n # these two lines are described in the pseudocode of the reference paper\n rec_refs = self.countRecursiveReferences()\n self.ref_count = {\n key: self.findReferences(key, *rec_refs) for key in self.grammar.keys()\n }", "def exercise_b2_86():\r\n pass", "def _make_simple_label(chain_parts):\n \n if not _select_simple_chainparts(chain_parts):\n msg = 'Jet Configuration error: '\\\n 'chain fails substring selection: not \"simple\" '\n\n raise NotImplementedError(msg)\n \n label = 'simple(['\n for cp in chain_parts:\n smcstr = str(cp['smc'])\n jvtstr = str(cp['jvt'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr: # Run 2 chains have \"INF\" in the SMC substring\n condition_str += ',%s)' % smcstr.replace('INF','')\n elif jvtstr:\n condition_str += ',%s)' % jvtstr\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def main():\n\n args = get_args()\n codons = {\n 'A': 4, 'C': 2, 'D': 2, 'E': 2, 'F': 2, 'G': 4, 'H': 2, 'I': 3,\n 'K': 2, 'L': 6, 'M': 1, 'N': 2, 'P': 4, 'Q': 2, 'R': 6, 'S': 6,\n 'T': 4, 'V': 4, 'W': 1, 'Y': 2, '*': 3,\n }\n print(product(map(codons.get, args.protein + '*')) % args.modulo)", "def _optionvarkey(name):\n return \"ragdoll%s\" % (name[0].upper() + name[1:])", "def exercise_2b():\n\n return", "def main(supply):\n\n # Define four parallel arrays: start_nodes, end_nodes, capacities, and unit costs\n # between each pair. For instance, the arc from node 0 to node 1 has a\n # capacity of 15 and a unit cost of 4.\n\n start_nodes = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 8, 8, 8, 8, 8, 9, 9, 9]\n end_nodes = [8, 2, 4, 6, 5, 4, 7, 6, 9, 8, 9, 0, 3, 4, 2, 5, 1, 0, 2, 5, 1, 8, 3, 4, 1, 0, 8, 1, 1, 0, 9, 5, 6, 1, 8, 2]\n capacities = [23, 10, 25, 15, 17, 14, 10, 21, 17, 11, 22, 27, 14, 6, 19, 9, 11, 8, 29, 16, 22, 29, 20, 13, 18, 14, 20, 25, 13, 8, 10, 24, 5, 9, 20, 28]\n unit_costs = [6, 9, 7, 8, 8, 5, 8, 5, 6, 9, 6, 5, 6, 6, 9, 7, 8, 6, 9, 6, 5, 5, 8, 7, 5, 8, 7, 9, 7, 6, 9, 6, 5, 5, 6, 7]\n\n # Define an array of supplies at each node.\n supplies = supply\n\n\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n\n # Add each arc.\n for i in range(0, len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i], end_nodes[i],\n capacities[i], unit_costs[i])\n\n # Add node supplies.\n\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n\n\n # Find the minimum cost flow between node 0 and node 4.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n print('Minimum cost:', min_cost_flow.OptimalCost())\n print('')\n print(' Arc Flow / Capacity Cost')\n flag = 1\n optimal_flows = np.zeros(36)\n for i in range(min_cost_flow.NumArcs()):\n cost = min_cost_flow.Flow(i) * min_cost_flow.UnitCost(i)\n print('%1s -> %1s %3s / %3s %3s' % (\n min_cost_flow.Tail(i),\n min_cost_flow.Head(i),\n min_cost_flow.Flow(i),\n min_cost_flow.Capacity(i),\n cost))\n # save answer to the variable\n optimal_flows[i] = min_cost_flow.Flow(i)\n return flag, optimal_flows\n else:\n print('There was an issue with the min cost flow input.')\n flag = 0\n return flag, 0", "def __init__(self, name):\n self.name = name\n self.difficulty = 0\n self.description = \"\"\n self.prerequisites = {}\n self.cost = {}\n self.effects = {}\n self.strings = {}", "def __init__(self):\n self.name = ''\n self.variables = []\n self.assumptions = []\n self.guarantees = []", "def _make_combinationsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'combinationsTest'\n\n \n\n return \"\"\"\n combgen(\n [(2)(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def getAmberParams(residue, name):\n atomname = name\n type = residue.get(\"type\")\n if type == 4:\n resname = residue.get(\"naname\")\n else:\n resname = residue.get(\"name\")\n\n # Residue Substitutions\n\n if residue.get(\"name\") == \"CYS\" and \"HG\" not in residue.get(\"map\"):\n resname = \"CYX\"\n elif residue.get(\"name\") == \"HIS\":\n if \"HD1\" in residue.get(\"map\") and \"HE2\" in residue.get(\"map\"):\n resname = \"HIP\"\n elif \"HD1\" in residue.get(\"map\"):\n resname = \"HID\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"HIE\"\n else:\n resname = \"HID\" # Default for no hydrogens\n elif residue.get(\"name\") == \"HSP\":\n resname = \"HIP\"\n elif residue.get(\"name\") == \"HSE\":\n resname = \"HIE\"\n elif residue.get(\"name\") == \"HSD\":\n resname = \"HID\"\n elif residue.get(\"name\") == \"GLU\" or residue.get(\"name\") == \"GLH\":\n if \"HE1\" in residue.get(\"map\"):\n resname = \"GLH\"\n if atomname == \"HE1\":\n atomname = \"HE2\"\n elif atomname == \"OE1\":\n atomname = \"OE2\"\n elif atomname == \"OE2\":\n atomname = \"OE1\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"GLH\"\n elif residue.get(\"name\") == \"ASP\" or residue.get(\"name\") == \"ASH\":\n if \"HD1\" in residue.get(\"map\"):\n resname = \"ASH\"\n if atomname == \"HD1\":\n atomname = \"HD2\"\n elif atomname == \"OD1\":\n atomname = \"OD2\"\n elif atomname == \"OD2\":\n atomname = \"OD1\"\n elif \"HD2\" in residue.get(\"map\"):\n resname = \"ASH\"\n\n if residue.get(\"isCterm\"):\n resname = \"C\" + resname\n elif residue.get(\"isNterm\"):\n resname = \"N\" + resname\n\n # Atom Substitutions\n\n if resname == \"WAT\":\n if atomname == \"O\":\n atomname = \"OW\"\n elif atomname == \"H1\":\n atomname = \"HW\"\n elif atomname == \"H2\":\n atomname = \"HW\"\n elif resname == \"ILE\":\n if atomname == \"CD\": atomname = \"CD1\"\n if resname[0] == \"N\" and resname != \"NME\": # N-terminal\n if atomname == \"H\": atomname = \"H1\"\n if (resname == \"CCYS\" or resname == \"NCYS\") and atomname == \"HG\": atomname = \"HSG\"\n if resname == \"CYM\" and atomname == \"H\": atomname = \"HN\"\n if residue.get(\"isNterm\") and resname == \"NPRO\" and atomname == \"HN2\":\n atomname = \"H2\"\n if residue.get(\"isNterm\") and resname == \"NPRO\" and atomname == \"HN1\":\n atomname = \"H3\"\n return resname, atomname", "def linenames():\n linenamesdic = {}\n\n linenamesdic['ovi1'] = ['OVI $\\\\lambda$1032' , 1031.9261, 'right' , 'Morton1991tab2']\n linenamesdic['ovi2'] = ['OVI $\\\\lambda$1038' , 1037.6167, 'left' , 'Morton1991tab2']\n linenamesdic['lyb'] = ['Ly$\\\\beta$ $\\\\lambda$1025' , 1025.7219, 'right' , 'Morton1991tab5']\n linenamesdic['lya'] = ['Ly$\\\\alpha$ $\\\\lambda$1216' , 1215.6737, 'right' , 'Morton1991tab5']\n linenamesdic[ 'NV1240'] = ['NV $\\\\lambda$1239' , 1238.821 , 'right' , 'Morton1991tab5']\n linenamesdic['nv2'] = ['NV $\\\\lambda$1243' , 1242.804 , 'left' , 'Morton1991tab5']\n linenamesdic['cii'] = ['CII $\\\\lambda$1336' , 1335.6627, 'right' , 'Morton1991tab5']\n linenamesdic['Siiv1'] = ['SiIV $\\\\lambda$1394' , 1393.755 , 'right' , 'Morton1991tab5']\n linenamesdic['oiv1'] = ['OIV $\\\\lambda$1397' , 1397.232 , 'right' , 'Morton1991tab5']\n linenamesdic['oiv2'] = ['OIV $\\\\lambda$1400' , 1399.780 , 'left' , 'Morton1991tab5']\n linenamesdic['Siiv2'] = ['SiIV $\\\\lambda$1403' , 1402.770 , 'left' , 'Morton1991tab5']\n linenamesdic['CIV1548'] = ['CIV $\\\\lambda$1548' , 1548.195 , 'right' , 'Morton1991tab5']\n linenamesdic['CIV1551'] = ['CIV $\\\\lambda$1551' , 1550.770 , 'left' , 'Morton1991tab5']\n linenamesdic['HeII1640'] = ['HeII $\\\\lambda$1640' , 1640.420 , 'right' , 'vandenberk+2001']\n linenamesdic['OIII1661'] = ['OIII] $\\\\lambda$1661' , 1660.809 , 'right' , 'Morton1991tab2']\n linenamesdic['OIII1666'] = ['OIII] $\\\\lambda$1666' , 1666.150 , 'left' , 'Morton1991tab2']\n linenamesdic['ciii1'] = ['[CIII] $\\\\lambda$1907' , 1907. , 'right' , 'stark+2015']\n linenamesdic['CIII1908'] = ['CIII] $\\\\lambda$1909' , 1909. , 'left' , 'stark+2015']\n linenamesdic['ciib'] = ['CII] $\\\\lambda$2326' , 2326.113 , 'right' , 'Morton1991tab5']\n linenamesdic['mgii1'] = ['MgII] $\\\\lambda$2796' , 2795.528 , 'right' , 'Morton1991tab5']\n linenamesdic['mgii2'] = ['MgII] $\\\\lambda$2803' , 2802.705 , 'left' , 'Morton1991tab5']\n linenamesdic['OII3727'] = ['[OII] $\\\\lambda$3726' , 3726. , 'right' , 'Pradhan2006']\n linenamesdic['oii2'] = ['[OII] $\\\\lambda$3729' , 3729. , 'left' , 'Pradhan2006']\n\n return linenamesdic", "def probability(cpts, term, obs):\r\n \r\n \r\n # term is a list e.g., ['x_1', '0']\r\n # flip refers to the assignment either '0' false or '1' true\r\n flip = term[1]\r\n # the term itself\r\n term = term[0]\r\n # accumulator variable\r\n answer = 0\r\n # this loop locates where in the CPT we're looking\r\n for clause in range(len(cpts)):\r\n if cpts[clause][0] == term:\r\n index = clause\r\n # focus on our term\r\n cpt = cpts[index]\r\n # this loop checks if there are no preconditions\r\n # if not, then we immediately know the probability and can return\r\n for m in range(len(cpt[1])):\r\n if cpt[1][m][-2][1] == '1':\r\n if cpt[1][m][0] == [[]]:\r\n answer = cpt[1][m][-1]\r\n # list of the variables we have observed\r\n have = []\r\n if obs != []:\r\n for k in obs:\r\n have.append(k[0])\r\n # list of variables we need to know in order to calculate the probability\r\n needed = []\r\n for prob in range(len(cpt[1])):\r\n for j in cpt[1][prob][0]:\r\n if j != []:\r\n if j[0] not in needed:\r\n needed.append(j[0])\r\n # conditional logic based on the known variables\r\n for required in needed:\r\n if required not in have:\r\n # deep copy our observations list\r\n obs2 = []\r\n obs3 = []\r\n for observs in obs:\r\n obs2.append(observs)\r\n obs3.append(observs)\r\n # if we need to know a variable but don't have it\r\n # then we allow it to be either 0 or 1\r\n obs3.append([required,'1'])\r\n obs2.append([required,'0'])\r\n # computes probability if the unknown term is true, times \r\n # the probability that the unknown term is true, plus the\r\n # probability if the unknown term is false, times the \r\n # probability that the unknown term is false\r\n answer = (probability(cpts, [term,flip], obs3) * probability(cpts, [required,'1'], obs)) + (probability(cpts, [term,flip], obs2) * (probability(cpts, [required,'0'], obs)))\r\n # this loop looks complicated but all it's doing is finding the correct\r\n # line in the CPT\r\n if cpt[1][prob][-2][1] == '1':\r\n count = 1\r\n for i in range(len(cpt[1][prob][0])):\r\n if cpt[1][prob][0][i] in obs:\r\n count *= 1\r\n else:\r\n count = 0\r\n if count == 1:\r\n answer += cpt[1][prob][-1]\r\n\r\n\r\n # this computes the probability that the term is true, so if we asked \r\n # for the probability that it is false, just return 1 - answer\r\n if flip == '0':\r\n return 1 - answer\r\n return answer", "def Problem11():\n return 'Ductile Coulomb-Mohr'", "def _get_vars(symbol: Union[str, int]) -> str:\n if isinstance(symbol, str):\n return {\n 'circle': 'var b1=n.round(t,2);',\n 'square': 'var b1=n.round(t,2);',\n 'diamond': 'var b1=n.round(t*1.3,2);',\n 'hexagram': 'var b1=n.round(t,2);var b2=n.round(t/2,2);var b3=n.round(t*Math.sqrt(3)/2,2);'\n }[symbol]\n return {\n 37: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 38: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 39: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 40: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 34: 'var d1=n.round(t,2);',\n 33: 'var d1=n.round(t*1.4,2);',\n 35: 'var d1=n.round(t*1.2,2);var d2=n.round(t*0.85,2);',\n 36: 'var d1=n.round(t/2,2);var d2=n.round(t,2);'\n }[symbol]", "def validVarConstructName(self,varname):\r\n if (len(varname[0])>32):\r\n return False, ''\r\n if not(varname[0][0].isalpha()):\r\n return False, '' \r\n for ch in varname[0][1:]:\r\n if not(ch.isalpha() or ch.isdigit() or ch=='_'):\r\n return False, ''\r\n \r\n return True, varname", "def getCharmmParams(residue, name):\n resname = residue.get(\"name\")\n atomname = name\n\n # Nucleic Acid Substitutions\n\n if residue.get(\"type\") == 4:\n resname = resname[0]\n if resname == \"A\":\n resname = \"ADE\"\n elif resname == \"C\":\n resname = \"CYT\"\n elif resname == \"G\":\n resname = \"GUA\"\n elif resname == \"T\":\n resname = \"THY\"\n if atomname == \"C7\":\n atomname = \"C5M\"\n elif atomname == \"H71\":\n atomname = \"H51\"\n elif atomname == \"H72\":\n atomname = \"H52\"\n elif atomname == \"H73\":\n atomname = \"H53\"\n elif resname == \"U\":\n resname = \"URA\"\n\n if atomname == \"H5'1\":\n atomname = \"H5'\"\n elif atomname == \"H5'2\":\n atomname = \"H5''\"\n elif atomname == \"H2'1\":\n atomname = \"H2'\"\n elif atomname in [\"H2'2\", \"HO'2\"]:\n atomname = \"H2''\"\n\n if residue.getAtom(\"O2'\") is None:\n if atomname in [\"C2'\", \"H2'\", \"H2''\"]: resname = \"DEO1\"\n\n if residue.getAtom(\"H5T\") is not None:\n if atomname in [\"H5T\", \"O5'\", \"C5'\"]: resname = \"5TER\"\n if residue.getAtom(\"H3T\") is not None:\n if atomname in [\"H3T\", \"O3'\", \"C3'\"]: resname = \"3TER\"\n\n # Terminal/Water Substitutions\n\n if residue.get(\"isNterm\"):\n if resname == \"GLY\" and atomname in [\"N\", \"H\", \"H2\", \"H3\", \"CA\", \"HA2\", \"HA3\"]:\n resname = \"GLYP\"\n if atomname == \"H\":\n atomname = \"HT1\"\n elif atomname == \"H2\":\n atomname = \"HT2\"\n elif atomname == \"H3\":\n atomname = \"HT3\"\n elif resname == \"PRO\" and atomname in [\"N\", \"HN1\", \"HN2\", \"CD\", \"CA\", \"HD1\", \"HD2\", \"HA\", \"H2\", \"H3\"]:\n resname = \"PROP\"\n if atomname == \"H2\":\n atomname = \"HN1\"\n elif atomname == \"H3\":\n atomname = \"HN2\"\n elif resname == \"ACE\":\n if atomname == \"CH3\":\n atomname = \"CAY\"\n elif atomname == \"HH31\":\n atomname = \"HY1\"\n elif atomname == \"HH32\":\n atomname = \"HY2\"\n elif atomname == \"HH33\":\n atomname = \"HY3\"\n elif atomname == \"C\":\n atomname = \"CY\"\n elif atomname == \"O\":\n atomname = \"OY\"\n else:\n if atomname in [\"N\", \"H\", \"H2\", \"H3\", \"CA\", \"HA\"]:\n resname = \"NTER\"\n if atomname == \"H\":\n atomname = \"HT1\"\n elif atomname == \"H2\":\n atomname = \"HT2\"\n elif atomname == \"H3\":\n atomname = \"HT3\"\n elif residue.get(\"isCterm\"):\n if atomname in [\"O\", \"OXT\", \"C\"]:\n resname = \"CTER\"\n if atomname == \"O\":\n atomname = \"OT1\"\n elif atomname == \"OXT\":\n atomname = \"OT2\"\n elif residue.get(\"type\") == 3:\n resname = \"TP3M\"\n if atomname == \"O\": atomname = \"OH2\"\n\n # Residue substitutions\n\n if resname == \"ILE\":\n if atomname == \"CD1\":\n atomname = \"CD\"\n elif atomname == \"HD11\":\n atomname = \"HD1\"\n elif atomname == \"HD12\":\n atomname = \"HD2\"\n elif atomname == \"HD13\":\n atomname = \"HD3\"\n elif atomname == \"HG12\":\n atomname = \"HG11\"\n elif atomname == \"HG13\":\n atomname = \"HG12\"\n elif resname == \"CYS\" and \"HG\" not in residue.get(\"map\"):\n resname = \"CYS\"\n if atomname == \"CB\":\n resname = \"DISU\"\n atomname = \"1CB\"\n elif atomname == \"SG\":\n resname = \"DISU\"\n atomname = \"1SG\"\n elif resname == \"HIS\":\n if \"HD1\" in residue.get(\"map\") and \"HE2\" in residue.get(\"map\"):\n resname = \"HSP\"\n elif \"HD1\" in residue.get(\"map\"):\n resname = \"HSD\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"HSE\"\n elif resname == \"GLU\" or resname == \"GLH\":\n if \"HE1\" in residue.get(\"map\"):\n if atomname == \"HE1\":\n atomname = \"HE2\"\n elif atomname == \"OE1\":\n atomname = \"OE2\"\n elif atomname == \"OE2\":\n atomname = \"OE1\"\n if atomname in [\"CG\", \"HG3\", \"HG1\", \"HG2\", \"CD\", \"OE1\", \"OE2\", \"HE2\"]:\n resname = \"GLUP\"\n else:\n resname = \"GLU\"\n elif \"HE2\" in residue.get(\"map\"):\n if atomname in [\"CG\", \"HG3\", \"HG1\", \"HG2\", \"CD\", \"OE1\", \"OE2\", \"HE2\"]:\n resname = \"GLUP\"\n else:\n resname = \"GLU\"\n elif resname == \"ASP\" or resname == \"ASH\":\n if \"HD1\" in residue.get(\"map\"):\n if atomname == \"HD1\":\n atomname = \"HD2\"\n elif atomname == \"OD1\":\n atomname = \"OD2\"\n elif atomname == \"OD2\":\n atomname = \"OD1\"\n if atomname in [\"CB\", \"HB3\", \"HB1\", \"HB2\", \"CG\", \"OD1\", \"OD2\", \"HD2\"]:\n resname = \"ASPP\"\n else:\n resname = \"ASP\"\n elif \"HD2\" in residue.get(\"map\"):\n if atomname in [\"CB\", \"HB3\", \"HB1\", \"HB2\", \"CG\", \"OD1\", \"OD2\", \"HD2\"]:\n resname = \"ASPP\"\n else:\n resname = \"ASP\"\n\n # HETATM Substitutions\n\n if resname == \"ACE\":\n if atomname == \"CH3\":\n atomname = \"CAY\"\n elif atomname == \"HH31\":\n atomname = \"HY1\"\n elif atomname == \"HH32\":\n atomname = \"HY2\"\n elif atomname == \"HH33\":\n atomname = \"HY3\"\n elif atomname == \"C\":\n atomname = \"CY\"\n elif atomname == \"O\":\n atomname = \"OY\"\n elif resname == \"ADP\":\n atomname = atomname.replace(\"*\", \"\\'\")\n elif resname == \"NME\":\n resname = \"CT3\"\n if atomname == \"HH31\":\n atomname = \"HT1\"\n elif atomname == \"HH32\":\n atomname = \"HT2\"\n elif atomname == \"HH33\":\n atomname = \"HT3\"\n elif atomname == \"CH3\":\n atomname = \"CAT\"\n elif atomname == \"N\":\n atomname = \"NT\"\n elif atomname == \"H\":\n atomname = \"HNT\"\n\n # Hydrogen Substitutions\n\n if atomname == \"H\":\n atomname = \"HN\"\n elif atomname == \"HA2\":\n atomname = \"HA1\"\n elif atomname == \"HA3\":\n atomname = \"HA2\"\n elif atomname == \"HB2\" and resname not in [\"ALA\"]:\n atomname = \"HB1\"\n elif atomname == \"HB3\" and resname not in [\"ALA\"]:\n atomname = \"HB2\"\n elif atomname == \"HD2\" and resname not in [\"HSP\", \"HSE\", \"HSD\", \"ASPP\"]:\n atomname = \"HD1\"\n elif atomname == \"HD3\" and resname not in [\"HIS\", \"HSE\", \"HSD\"]:\n atomname = \"HD2\"\n elif atomname == \"HE2\" and resname not in [\"TRP\", \"HSP\", \"HSE\", \"HSD\", \"GLUP\"]:\n atomname = \"HE1\"\n elif atomname == \"HE3\" and resname not in [\"TRP\", \"HSP\", \"HSE\", \"HSD\"]:\n atomname = \"HE2\"\n elif atomname == \"HG2\":\n atomname = \"HG1\"\n elif atomname == \"HG3\":\n atomname = \"HG2\"\n elif atomname == \"HG\" and resname in [\"SER\", \"CYS\"]:\n atomname = \"HG1\"\n\n return resname, atomname", "def let():\n def from_many(*kv_pairs):\n new_bindings = {}\n for entry in kv_pairs:\n with match(entry) as case:\n with case('Quoted(Sexpr(Name(name), expr))') as [m]:\n new_bindings[m.name] = m.expr\n\n def _from_many(quoted_body):\n return EvaluateInContext(\n push_subscope_with(new_bindings),\n pop_subscope,\n quoted_body.subexpression\n )\n\n return e.Function({parse_fn(\"(λ &[any] . any)\"): _from_many})\n yield (\"(λ ...&[(name any)] . (λ &[any] . any))\", from_many)\n\n def from_one(key, value, quoted_body):\n return EvaluateInContext(\n push_subscope_with({key.subexpression.name: value}),\n pop_subscope,\n quoted_body.subexpression\n )\n yield (\"(λ &[name] any &[any] . any)\", from_one)", "def lookup(name):", "def lookup(name):", "def _parse_var_initsol(self,varname) :\n\t\tinitsol = self.ss.constraint.initsol\n\t\tparams = getattr(initsol,varname)\n\t\tnvars = len(self.ss.variables) # num of variables\n\n\t\tif varname in ('alpha','beta') : \n\t\t\tself.initsol[varname] = np.ones(nvars)\n\t\t\tkeys = params.keys()\n\t\t\tself.initsol[varname][:] = params['defaultInitialValue']\n\t\t\tfor key in keys : \n\t\t\t\tif re.match(varname+'_\\d+',key)\t:\n\t\t\t\t\tidx = int(key.split('_')[1])\n\t\t\t\t\tself.initsol[varname][idx-1] = params[key]\n\t\telif varname in ('g','h') :\n\t\t\tself.initsol[varname] = np.ones([nvars,nvars])\n\t\t\tkeys = params.keys()\n\t\t\tself.initsol[varname][:] = params['defaultInitialValue']\n\t\t\tfor key in keys : \n\t\t\t\tif re.match(varname+'_\\d+_\\d+',key)\t:\n\t\t\t\t\tidr,idc = map(int,(key.split('_')[1:3]))\n\t\t\t\t\tself.initsol[varname][idr-1][idc-1] = params[key]\n\t\t\n\t\telse :\n\t\t\tlogging.error(\"Unrecognized varname %s quitting..\" \\\n\t\t\t%(varname))\n\t\t\tsys.exit(1)", "def test_add_var_desc():\n v = dd.vars['WGT']\n \n assert add_var_desc('Housing ', dd, 'WGT') == 'WGT'\n assert v.vardesc == 'Housing'\n\n \"\"\" Test add second line \"\"\"\n assert add_var_desc(' Unit Weight', dd, 'WGT') == 'WGT'\n assert v.vardesc == 'Housing Unit Weight'\n\n \"\"\" Test prevention against duplication \"\"\"\n assert add_var_desc('Housing Unit Weight', dd, 'WGT') == 'WGT'\n assert add_var_desc('HousingUnit Weight', dd, 'WGT') == 'WGT'\n\n assert add_var_desc('Person', dd, 'PWGT') == None", "def prIn(nm, form=\"\", *args):\n # variables set in \"prSet()\"\n global lPr\n\n # init\n if not 'lPr' in globals():\n prSet(3)\n\n # print\n if form == \"\":\n pr('%s', nm)\n else:\n pr('%s: ' + form, nm, *args)\n\n # self add\n lPr = lPr + 1", "def test_get_call_name2(self):\n tree = ast.parse(\"a.b.c.d(x,y)\").body[0].value\n\n name = b_utils.get_call_name(tree, {\"a\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y.b.c.d\", name)\n\n name = b_utils.get_call_name(tree, {\"a.b\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y.c.d\", name)\n\n name = b_utils.get_call_name(tree, {\"a.b.c.d\": \"alias.x.y\"})\n self.assertEqual(\"alias.x.y\", name)", "def _make_simple_comb_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n simple_strs = []\n\n for cp in cps:\n print(cp)\n simple_strs.append(_make_simple_label([cp]))\n\n label = 'combgen([(%d)]' % len(cps)\n for s in simple_strs:\n label += ' %s ' % s\n label += ')'\n return label", "def test_expand_var(self):\n self.assertEqual(\"test\",\n grammar._EXPAND_VAR.parseString(\"$test\").name)", "def _make_partitionsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'partitionsTest'\n\n \n\n return \"\"\"\n partgen(\n [(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def standard_name_to_long_name(prop_dict, context=None):\n########################################################################\n # We assume that standar_name has been checked for validity\n # Make the first char uppercase and replace each underscore with a space\n if 'standard_name' in prop_dict:\n standard_name = prop_dict['standard_name']\n if len(standard_name) > 0:\n long_name = standard_name[0].upper() + re.sub(\"_\", \" \", standard_name[1:])\n else:\n long_name = ''\n # End if\n # Next, substitute a decimal point for the p in [:digit]p[:digit]\n match = real_subst_re.match(long_name)\n while match is not None:\n long_name = match.group(1) + '.' + match.group(2)\n match = real_subst_re.match(long_name)\n # End while\n else:\n long_name = ''\n if 'local_name' in prop_dict:\n lname = ' {}'.format(prop_dict['local_name'])\n else:\n lname = ''\n # End if\n ctxt = context_string(context)\n raise CCPPError('No standard name to convert{} to long name{}'.format(lname, ctxt))\n # End if\n return long_name", "def _linab(arg, symbol):\n arg = arg.expand()\n ind, dep = arg.as_independent(symbol)\n if not arg.is_Add:\n b = 0\n a, x = ind, dep\n else:\n b = ind\n a, x = separatevars(dep).as_independent(symbol, as_Add=False)\n if x.could_extract_minus_sign():\n a = -a\n x = -x\n return a, b, x", "def __splitVariableNames(self, name, indexes):\n if name == 'x':\n var = self.xCoordinates[indexes[0]][indexes[1]]\n elif name == 'y':\n var = self.yCoordinates[indexes[0]][indexes[1]]\n elif name == 'z':\n var = self.zCoordinates[indexes[0]][indexes[1]]\n elif name == 'colorMap':\n var = self.colorMapCoordinates[indexes[0]][indexes[1]]\n elif name == 'clusterLabels':\n var = self.clusterLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureLabels':\n var = self.mixtureLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureMeans':\n var = self.mixtureMeans[indexes[0]][indexes[1]]\n elif name == 'mixtureCovars':\n var = self.mixtureCovars[indexes[0]][indexes[1]]\n\n # The variable can contain brackets {} (when the symbol \"|\" is present in\n # the variable name), e.g.:\n # DataName|Input|{RavenAuxiliary|variableName|initial_value}\n # or it can look like:\n # DataName|Input|variableName\n\n if var is not None:\n result = [None] * 3\n if '|input|' in var.lower():\n match = re.search(r\"(\\|input\\|)\", var.lower())\n elif '|output|' in var.lower():\n match = re.search(r\"(\\|output\\|)\", var.lower())\n else:\n self.raiseAnError(IOError, f'In Plot {self.name}, the input coordinate {name} has not specified an \"Input\" or \"Output\" (case insensitive). e.g., sourceName|Input|aVariable) in {var}')\n startLoc, endLoc = match.start(), match.end()\n result = [var[:startLoc].strip(), var[startLoc+1:endLoc-1].strip(), var[endLoc:].strip()]\n if '{' in result[-1] and '}' in result[-1]:\n locLower, locUpper = result[-1].find(\"{\"), result[-1].rfind(\"}\")\n result[-1] = result[-1][locLower + 1:locUpper].strip()\n else:\n result = None\n\n return result", "def r_4(comps: 'list(Compound)', is_input):\r\n react: str\r\n\r\n if is_input:\r\n iSiMe = Compound(\"Al\")\r\n if len(comps) == 1:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe = comps[0]\r\n else:\r\n if \"iSi\" in comps[0].comp_type:\r\n iSiMe = comps[0]\r\n else:\r\n iSiMe = comps[1]\r\n\r\n me = list(iSiMe.formula.consist.keys())[0].name\r\n if me in [\"Au\", \"Pt\"]:\r\n return \"\"\r\n me_oxs = get_me_oxs(me)\r\n if me_oxs == 0:\r\n return \"\"\r\n if is_me_activer(\"H\", 1, me, me_oxs):\r\n return \"\"\r\n\r\n iSaNo = Compound(iSaNo_create(me, me_oxs, \"NO3\", 1))\r\n\r\n react = f\"{iSiMe} + HNO3 -> {iSaNo} + N2 + H2O\"\r\n else:\r\n iSaNo = Compound(\"Al(NO3)3\")\r\n if len(comps) == 1:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif len(comps) == 2:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSaNo\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n else:\r\n if \"iSaNo\" in comps[0].comp_type:\r\n iSaNo = comps[0]\r\n elif \"iSaNo\" in comps[1].comp_type:\r\n iSaNo = comps[1]\r\n else:\r\n iSaNo = comps[2]\r\n\r\n ((me, me_oxs), (an, an_oxs)) = iSa_oxs(iSaNo.formula)\r\n if an != \"NO3\":\r\n return \"\"\r\n if me in [\"Au\", \"Pt\"]:\r\n return \"\"\r\n if is_me_activer(\"H\", 1, me, me_oxs):\r\n return \"\"\r\n\r\n iSiMe = Compound(simple(me))\r\n\r\n react = f\"{iSiMe} + HNO3 -> {iSaNo} + N2 + H2O\"\r\n\r\n return Reaction(react)", "def _build_sub(self) -> str:\n return dedent(\n \"\"\"\n @SP\n M=M-1\n A=M\n D=M\n @SP\n M=M-1\n A=M\n M=M-D\n @SP\n M=M+1\n \"\"\"\n )", "def demo():\n # Create some nonterminals\n S, NP, VP, PP = nonterminals('S, NP, VP, PP')\n N, V, P, Det = nonterminals('N, V, P, Det')\n VP_slash_NP = VP/NP\n\n print 'Some nonterminals:', [S, NP, VP, PP, N, V, P, Det, VP/NP]\n print ' S.symbol() =>', `S.symbol()`\n print\n\n # Create some CFG Productions\n prods = [CFGProduction(S, [NP, VP]), CFGProduction(PP, [P, NP]),\n CFGProduction(NP, [Det, N]), CFGProduction(NP, [NP, PP]),\n CFGProduction(VP, [V, NP]), CFGProduction(VP, [VP, PP]),\n CFGProduction(Det, ['a']), CFGProduction(Det, ['the']),\n CFGProduction(N, ['dog']), CFGProduction(N, ['cat']), \n CFGProduction(V, ['chased']), CFGProduction(V, ['sat']),\n CFGProduction(P, ['on']), CFGProduction(P, ['in'])]\n\n prod = prods[2]\n print 'A CFG production:', `prod`\n print ' prod.lhs() =>', `prod.lhs()`\n print ' prod.rhs() =>', `prod.rhs()`\n print\n\n # Create and print a CFG\n cfg = CFG(S, prods)\n print 'A CFG grammar:', `cfg`\n print ' cfg.start() =>', `cfg.start()`\n print ' cfg.productions() =>',\n # Use string.replace(...) is to line-wrap the output.\n print `cfg.productions()`.replace(',', ',\\n'+' '*25)\n print\n\n # Create some probabilistic CFG Productions\n A, B, C = nonterminals('A, B, C')\n pcfg_prods = [PCFGProduction(A, [B, B], prob=0.3),\n PCFGProduction(A, [C, B, C], prob=0.7),\n PCFGProduction(B, [B, 'b'], prob=0.5),\n PCFGProduction(B, [C], prob=0.5),\n PCFGProduction(C, ['a'], prob=0.1),\n PCFGProduction(C, ['b'], prob=0.9)] \n \n pcfg_prod = pcfg_prods[2]\n print 'A PCFG production:', `pcfg_prod`\n print ' pcfg_prod.lhs() =>', `pcfg_prod.lhs()`\n print ' pcfg_prod.rhs() =>', `pcfg_prod.rhs()`\n print ' pcfg_prod.prob() =>', `pcfg_prod.prob()`\n print\n\n # Create and print a PCFG\n pcfg = PCFG(S, pcfg_prods)\n print 'A PCFG grammar:', `pcfg`\n print ' pcfg.start() =>', `pcfg.start()`\n print ' pcfg.productions() =>',\n # Use string.replace(...) is to line-wrap the output.\n print `pcfg.productions()`.replace(',', ',\\n'+' '*26)\n print", "def variable_string(self, name):\n return \"$(\" + name + \")\"", "def examples():\r\n\r\n # get some data for a single name\r\n x = blp.bdp('BDEV LN Equity', 'px_last')\r\n print(x)\r\n print('the type of x', type(x))\r\n print('the value of x:', x.iloc[0]['px_last'])\r\n\r\n\r\n # get multiple data for a single name\r\n y = blp.bdp('BDEV LN Equity', flds=['px_bid', 'px_ask'])\r\n print(y)\r\n\r\n\r\n # get multiple data for multiple names\r\n z = blp.bdp(tickers=['BDEV LN Equity', 'BARC LN Equity'], flds=['px_bid', 'px_ask'])\r\n print(z)\r\n print('here is the bdev ask >>>', z.loc['BDEV LN Equity','px_ask'])", "def part(expr,address):\n for num in address:\n expr = expr.args[num]\n return expr", "def test_variablepresentations_get(self):\n pass", "def test():\n assert str(Polynomial(0, 1, 0, -1, 4, -2, 0, 1, 3, 0)) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x\"\n assert str(Polynomial([-5, 1, 0, -1, 4, -2, 0, 1, 3, 0])) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x - 5\"\n assert str(Polynomial(x7=1, x4=4, x8=3, x9=0, x0=0, x5=-2, x3=-1, x1=1)) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x\"\n assert str(Polynomial(x2=0)) == \"0\"\n assert str(Polynomial(x0=0)) == \"0\"\n assert Polynomial(x0=2, x1=0, x3=0, x2=3) == Polynomial(2, 0, 3)\n assert Polynomial(x2=0) == Polynomial(x0=0)\n assert str(Polynomial(x0=1) + Polynomial(x1=1)) == \"x + 1\"\n assert str(Polynomial([-1, 1, 1, 0]) + Polynomial(1, -1, 1)) == \"2x^2\"\n pol1 = Polynomial(x2=3, x0=1)\n pol2 = Polynomial(x1=1, x3=0)\n assert str(pol1 + pol2) == \"3x^2 + x + 1\"\n assert str(pol1 + pol2) == \"3x^2 + x + 1\"\n assert str(Polynomial(x0=-1, x1=1) ** 1) == \"x - 1\"\n assert str(Polynomial(x0=-1, x1=1) ** 2) == \"x^2 - 2x + 1\"\n pol3 = Polynomial(x0=-1, x1=1)\n assert str(pol3 ** 4) == \"x^4 - 4x^3 + 6x^2 - 4x + 1\"\n assert str(pol3 ** 4) == \"x^4 - 4x^3 + 6x^2 - 4x + 1\"\n assert str(Polynomial(x0=2).derivative()) == \"0\"\n assert str(Polynomial(x3=2, x1=3, x0=2).derivative()) == \"6x^2 + 3\"\n assert str(Polynomial(x3=2, x1=3, x0=2).derivative().derivative()) == \"12x\"\n pol4 = Polynomial(x3=2, x1=3, x0=2)\n assert str(pol4.derivative()) == \"6x^2 + 3\"\n assert str(pol4.derivative()) == \"6x^2 + 3\"\n assert Polynomial(-2, 3, 4, -5).at_value(0) == -2\n assert Polynomial(x2=3, x0=-1, x1=-2).at_value(3) == 20\n assert Polynomial(x2=3, x0=-1, x1=-2).at_value(3, 5) == 44\n pol5 = Polynomial([1, 0, -2])\n assert pol5.at_value(-2.4) == -10.52\n assert pol5.at_value(-2.4) == -10.52\n assert pol5.at_value(-1, 3.6) == -23.92\n assert pol5.at_value(-1, 3.6) == -23.92", "def vars_formula ( self , formula , vars , name = '' , title = '' ) :\n\n assert vars and len ( vars ) , 'Variables must be specified!'\n\n vvars = []\n for v in vars :\n if isinstance ( v , ROOT.RooAbsArg ) :\n vvars.append ( v )\n elif isinstance ( v , string_types ) :\n try :\n vv = self.parameter ( v )\n vvars.append ( vv ) \n except :\n raise TypeError ( \"Unknown parameter name %s\" % v)\n else :\n raise TypeError( \"Unknown parameter type %s/%s\" % ( v , type ( v ) ) ) \n\n vlst = ROOT.RooArgList()\n for v in vvars : vlst.add ( v )\n\n has_at = '@' in formula\n has_percent = '%' in formula\n import re\n has_index = re.search ( r'\\[( *)(?P<degree>\\d*)( *)\\]' , formula )\n has_format1 = re.search ( r'\\{( *)(?P<degree>\\d*)( *)\\}' , formula )\n has_format2 = re.search ( r'\\{( *)(?P<degree>\\w*)( *)\\}' , formula )\n\n formula_ = formula \n if has_at : pass \n elif has_index : pass \n elif has_percent : \n vnames = tuple ( [ p.name for p in vlst ] )\n formula_ = formula % vnames\n elif has_format1 : \n vnames = tuple ( [ p.name for p in vlst ] )\n formula_ = formula.format ( *vnames ) \n elif has_format2 :\n kw = {}\n for p in vlist : kw [ p.name ] = p.name\n formula_ = formula.format ( *kw )\n \n name = name if name else 'Formula_%s ' % self.name \n title = title if title else 'Formula:%s/%s' % ( formula , self.name )\n \n rfv = ROOT.RooFormulaVar ( self.var_name ( name ) , title , formula_ , vlst )\n \n self.aux_keep.append ( vlst )\n self.aux_keep.append ( rvf )\n \n return rfv", "def enrich_varname(varname):\r\n greek = (\"alpha beta gamma delta epsilon varepsilon zeta eta theta \"\r\n \"vartheta iota kappa lambda mu nu xi pi rho sigma tau upsilon \"\r\n \"phi varphi chi psi omega\").split()\r\n\r\n # add capital greek letters\r\n greek += [x.capitalize() for x in greek]\r\n\r\n # add hbar for QM\r\n greek.append('hbar')\r\n\r\n # add infinity\r\n greek.append('infty')\r\n\r\n if varname in greek:\r\n return ur\"\\{letter}\".format(letter=varname)\r\n else:\r\n return varname.replace(\"_\", r\"\\_\")", "def get_basis(n):\n\treturn ' '.join('e{}'.format(i) for i in range(n))", "def solution(s):", "def pulp_smash():", "def Get_LonghurstProvinceName4Num(input):\n LonghurstProvinceDict = {\n 'ALSK': 'AlaskaDownwellingCoastalProvince',\n 'ANTA': 'AntarcticProvince',\n 'APLR': 'AustralPolarProvince',\n 'ARAB': 'NWArabianUpwellingProvince',\n 'ARCH': 'ArchipelagicDeepBasinsProvince',\n 'ARCT': 'AtlanticArcticProvince',\n 'AUSE': 'EastAustralianCoastalProvince',\n 'AUSW': 'AustraliaIndonesiaCoastalProvince',\n 'BENG': 'BenguelaCurrentCoastalProvince',\n 'BERS': 'N.PacificEpicontinentalProvince',\n 'BPLR': 'BorealPolarProvince(POLR)',\n 'BRAZ': 'BrazilCurrentCoastalProvince',\n 'CAMR': 'CentralAmericanCoastalProvince',\n 'CARB': 'CaribbeanProvince',\n 'CCAL': 'CaliforniaUpwellingCoastalProvince',\n 'CHIL': 'ChilePeruCurrentCoastalProvince',\n 'CHIN': 'ChinaSeaCoastalProvince',\n 'CHSB': 'CheasapeakeBayProvince',\n 'CNRY': 'CanaryCoastalProvince(EACB)',\n 'EAFR': 'E.AfricaCoastalProvince',\n 'ETRA': 'EasternTropicalAtlanticProvince',\n 'FKLD': 'SWAtlanticShelvesProvince',\n 'GFST': 'GulfStreamProvince',\n 'GUIA': 'GuianasCoastalProvince',\n 'GUIN': 'GuineaCurrentCoastalProvince',\n 'INDE': 'E.IndiaCoastalProvince',\n 'INDW': 'W.IndiaCoastalProvince',\n 'ISSG': 'IndianS.SubtropicalGyreProvince',\n 'KURO': 'KuroshioCurrentProvince',\n 'LAKE': 'CaspianSea,AralSea',\n 'MEDI': 'MediterraneanSea,BlackSeaProvince',\n 'MONS': 'IndianMonsoonGyresProvince',\n 'NADR': 'N.AtlanticDriftProvince(WWDR)',\n 'NASE': 'N.AtlanticSubtropicalGyralProvince(East)(STGE)',\n 'NASW': 'N.AtlanticSubtropicalGyralProvince(West)(STGW)',\n 'NATR': 'N.AtlanticTropicalGyralProvince(TRPG)',\n 'NECS': 'NEAtlanticShelvesProvince',\n 'NEWZ': 'NewZealandCoastalProvince',\n 'NPPF': 'N.PacificPolarFrontProvince',\n 'NPSE': 'N.PacificSubtropicalGyreProvince(East)',\n 'NPSW': 'N.PacificSubtropicalGyreProvince(West)',\n 'NPTG': 'N.PacificTropicalGyreProvince',\n 'NWCS': 'NWAtlanticShelvesProvince',\n 'OCAL': 'OffshoreCaliforniaCurrentProvince',\n 'PEQD': 'PacificEquatorialDivergenceProvince',\n 'PNEC': 'N.PacificEquatorialCountercurrentProvince',\n 'PSAE': 'PacificSubarcticGyresProvince(East)',\n 'PSAW': 'PacificSubarcticGyresProvince(West)',\n 'REDS': 'RedSea,PersianGulfProvince',\n 'SANT': 'SubantarcticProvince',\n 'SARC': 'AtlanticSubarcticProvince',\n 'SATL': 'SouthAtlanticGyralProvince(SATG)',\n 'SPSG': 'S.PacificSubtropicalGyreProvince',\n 'SSTC': 'S.SubtropicalConvergenceProvince',\n 'SUND': 'SundaArafuraShelvesProvince',\n 'TASM': 'TasmanSeaProvince',\n 'WARM': 'W.PacificWarmPoolProvince',\n 'WTRA': 'WesternTropicalAtlanticProvince'\n }\n return LonghurstProvinceDict[input]", "def change_variables((a,b,c,d), (n,r,m)): \n return ( n*a**2 + r*a*b + m*b**2, 2*(n*a*c + m*b*d) + r*(a*d + c*b), \\\n n*c**2 + r*c*d + m*d**2 )", "def createkey(*args): # {{{2\n return '-'.join(map(simplifyname, args))", "def SSt_theo_old(D, k):\n\ta1b = k[\"A1B1\"]\n\tba1 = k[\"B1A1\"]\n\tca1 = k[\"C1A1\"]\n\tcb = k[\"B1C1\"]\n\tnum = a1b*ba1*ca1*ca1 + ba1*ba1*ca1*ca1 + 3*a1b*ba1*ca1*cb + 2*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 2*ba1*ca1*ca1*cb + 2*a1b*ba1*cb*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*a1b*ca1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t3*ba1*ca1*ca1*ca1 + 2*a1b*ba1*ba1*cb + ba1*ba1*ba1*cb + 2*a1b*ba1*ca1*cb + \\\n\t\t\t3*ba1*ba1*ca1*cb + 4*a1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + \\\n\t\t\t2*a1b*ba1*cb*cb + 2*ba1*ba1*cb*cb + 2*a1b*ca1*cb*cb + 4*ba1*ca1*cb*cb + \\\n\t\t\t2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 4*ba1*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t2*ca1*ca1*ca1*ca1 + ba1*ba1*ba1*cb + 3*a1b*ba1*ca1*cb + 3*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\t(ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 3*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + \\\n\t\t\t2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\tba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\tden = a1b*(ba1*ba1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb + ba1*ba1*cb*cb + \n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + 4*ca1*ca1*ca1*cb + \n\t\t\t2*ba1*ba1*cb*cb + 4*ba1*ca1*cb*cb + 2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\ta1b*(2*ba1*ba1*ca1*ca1 + 4*ca1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + \n\t\t\t4*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\ta1b*ba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\ttau = num/den\n\t##\n\treturn tau*np.log(20)", "def get_strains(names, q_dof):\n strain_functions = []\n for n, this_dof in zip(names, q_dof):\n check_qdof(n, this_dof)\n if n == 'linear_helix':\n strain_functions.append(linear_helix_strain)\n elif n == 'pure_helix':\n strain_functions.append(pure_helix_strain)\n elif n == 'torsion_helix':\n strain_functions.append(torsion_helix_strain)\n elif n == 'torsion_linear_helix':\n strain_functions.append(torsion_linear_helix_strain)\n elif n == 'quadratic':\n strain_functions.append(quadratic_strain)\n elif n == 'linear':\n strain_functions.append(linear_strain)\n elif n == 'constant':\n strain_functions.append(constant_strain)\n elif n == 'full':\n strain_functions.append(full_strain)\n else:\n print(f'{n} is not a defined strain base.')\n return strain_functions", "def address():\n # We start with generating the street name. For this we choose\n # between the most common prefixes and our own prefixes\n prefix = dice.randint(1, 100)\n if prefix <= 10: # 10%\n prefix = \"Haupt\"\n elif prefix <= 18: # 8%\n prefix = \"Schul\"\n elif prefix <= 25: # 7%\n prefix = \"Garten\"\n elif prefix <= 32: # 7%\n prefix = \"Dorf\"\n elif prefix <= 39: # 7%\n prefix = \"Bahnhof\"\n elif prefix <= 46: # 7%\n prefix = \"Wiesen\"\n elif prefix <= 52: # 6%\n prefix = \"Berg\"\n elif prefix <= 56: # 4%\n prefix = \"Kirch\"\n elif prefix <= 60: # 4%\n prefix = \"Wald\"\n elif prefix <= 64: # 4%\n prefix = \"Ring\"\n else:\n prefix = dice.choice(names.prefix)\n\n # Now we can add the suffix\n suffix = dice.randint(1, 100)\n if suffix <= 78:\n suffix = \"straße\"\n elif suffix <= 96:\n suffix = \"weg\"\n elif suffix <= 98:\n suffix = \"allee\"\n elif suffix == 99:\n suffix = \"ring\"\n elif suffix == 100:\n suffix = \"platz\"\n\n # When we have a city name as prefix, we need to capitalize the\n # suffix since it will be two words\n if prefix[-1] == \" \":\n suffix = suffix.capitalize()\n\n # Now we can add them together\n street = prefix + suffix\n\n # We need a house number as well. In Germany most numbers have\n # between one and four digits, so we will use this as base. Lower\n # numbers are more common, so we'll give it a 10% probability of\n # using 3 digits and 1% of using 4 digits\n digits = dice.randint(1, 100)\n if digits == 100:\n house_number = str(dice.randint(1000, 9999))\n elif digits >= 90:\n house_number = str(dice.randint(100, 999))\n else:\n house_number = str(dice.randint(1, 99))\n address_full = street + \" \" + house_number\n return address_full", "def __init__(self,name):\n self._name = name\n self._supplies = []\n self.generateSupplies()", "def create_extended_name(y: str, p: str) -> str:\n final_letter = y[-1]\n if final_letter == \"e\":\n extended_name = y + \"x\" + p\n elif final_letter in [\"a\", \"i\", \"o\", \"u\"]:\n extended_name = y[:-1] + \"ex\" + p\n elif final_letter == \"x\":\n if y[-2] == \"e\":\n extended_name = y + p\n else:\n extended_name = y + \"ex\" + p\n return extended_name", "def add_proper_name (w,lx):\n if ('A' <= w[0] and w[0] <= 'Z'):\n lx.add(w,'P')\n return ''\n else:\n return (w + \" isn't a proper name\")", "def add_proper_name (w,lx):\n if ('A' <= w[0] and w[0] <= 'Z'):\n lx.add(w,'P')\n return ''\n else:\n return (w + \" isn't a proper name\")", "def _make_ht_label(chain_parts):\n\n assert len(chain_parts) == 1, '_make_ht_label, no. of chain parts != 1'\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario.startswith('HT'), '_make_ht_label(): scenario does not start with HT'\n\n arg_res = [\n re.compile(r'^(?P<lo>\\d*)(?P<key>ht)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>eta)(?P<hi>\\d*)$'),\n ]\n\n defaults = {\n 'ht': ('0', 'inf'),\n 'et': ('0', 'inf'),\n 'eta': ('0', 'inf'),\n }\n\n\n args = _args_from_scenario(scenario)\n argvals = {}\n nargs = len(args)\n assert len(args) <= len(arg_res), 'bad num of args %d, expected < %d' % (len(args),\n len(arg_res))\n\n # obtain argument values frrom scenario\n while args:\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = float(defaults[key][0])\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = float(defaults[key][1])\n argvals[key+'hi'] = hi\n\n print (argvals)\n assert len(argvals) == 2*nargs, 'no of args: %d, expected %d' % (len(argvals), 2*nargs)\n\n print ('sent 100')\n result = \"\"\"\n ht([(%(htlo).0fht) \n (%(etlo).0fet)\n (%(etalo).0feta%(etahi).0f)\n ])\"\"\" % argvals\n print (result)\n return result", "def __init__(self):\n\n # names of atoms that make up relevant segements of each chain\n self.chains = {'a': {'C': 'C1', 'C1': 'C2', 'C2': 'C3', 'C3': 'C4', 'C4': 'C5', 'H': 'H1', 'H1': 'H2',\n 'H2': 'H3', 'H3': 'H4', 'H4': 'H5'},\n 'b': {'C45': 'C1', 'C44': 'C2', 'C43': 'C3', 'C42': 'C4', 'C41': 'C5', 'H81': 'H1', 'H80': 'H2',\n 'H79': 'H3', 'H78': 'H4', 'H77': 'H5'}\n }\n\n self.nchains = len(list(self.chains.keys()))\n\n self.chain_numbers = {'a': 0, 'b': 1} # used to number chains\n\n # self.initial_types = {'C1': 'c2', 'C2': 'ce', 'C3': 'ce', 'C4': 'c2', 'H1': 'ha', 'H2': 'ha', 'H3': 'ha',\n # 'H4': 'ha', 'H5': 'ha'}\n\n # all indices numbered from 0. D1, D2, ... correspond to dummies attached to C1, C2, ... respectively\n self.indices = {'a': {'C1': 0, 'C2': 1, 'C3': 2, 'C4': 3, 'C5': 4, 'H1': 52, 'H2': 53, 'H3': 54, 'H4': 55,\n 'H5': 56, 'D1': 136, 'D2': 137, 'D3': 138, 'D4': 139},\n 'b': {'C1': 49, 'C2': 48, 'C3': 47, 'C4': 46, 'C5': 45, 'H1': 133, 'H2': 132, 'H3': 131,\n 'H4': 130, 'H5': 129, 'D1': 140, 'D2': 141, 'D3': 142, 'D4': 143}\n }\n\n self.dummy_connectivity = {'a': {'C': 'D1', 'C1': 'D2', 'C2': 'D3', 'C3': 'D4'},\n 'b': {'C45': 'D1', 'C44': 'D2', 'C43': 'D3', 'C42': 'D4'}}\n\n self.hydrogen_connectivity = {'C': ['H1', 'H2'], 'C1': ['H3'], 'C2': ['H4'], 'C3': ['H5'],\n 'C45': ['H1', 'H2'], 'C44': ['H3'], 'C43': ['H4'], 'C42': ['H5']}\n\n self.dummy_mass = 1.008 # mass of hydrogen\n\n # write these in order of priority\n # for efficiency, don't repeat things. For example self.carbons['C1']: self.carbons['C2'] is the same as\n # self.carbons['C2']: self.carbons['C1']. Otherwise, computational expense goes up and a new reaction has\n # to be defined below.\n self.carbons = {'C1': ['C', 'C45'], 'C2': ['C1', 'C44'], 'C3': ['C2', 'C43'], 'C4': ['C3', 'C42']}\n self.bonds_with = [[self.carbons['C1'], self.carbons['C2']]]\n\n # define which improper dihedrals to remove -- written in same order as .itp file!!!\n # note that the order of the atoms may be different for each chain\n # NOTE: C3 not tested\n self.impropers = {'a': {'C1': ['H2', 'C1', 'H1', 'C2'], 'C2': ['C1', 'C3', 'C2', 'H3'],\n 'C3': ['C4', 'C2', 'C3', 'H4'], 'C4': ['C5', 'C3', 'C4', 'H5']},\n 'b': {'C1': ['C2', 'H2', 'C1', 'H1'], 'C2': ['C1', 'C3', 'C2', 'H3'],\n 'C3': ['C4', 'C2', 'C3', 'H4'], 'C4': ['C5', 'C3', 'C4', 'H5']}}", "def construct_name(p, prefix):\n name = prefix\n for key in p.keys():\n if (type(p[key]) != tuple) and (type(p[key]) != list):\n name = name + '_' + str(key) + '-' + str(p[key])\n else:\n name = name + '_' + str(key) + '-' + str(p[key][0])\n return name", "def roo_name ( prefix = 'roo_' , suffix = '' ) :\n regname = ROOT.RooNameReg.instance()\n name = prefix + suffix\n MakeVar.__numnames += 1 \n while name in MakeVar.__pdf_names or name in MakeVar.__var_names or regname.known ( name ) or not name :\n name = prefix + ''.join ( ( random.choice ( ascii_letters ) for i in range ( 6 ) ) ) + suffix \n MakeVar.__numnames += 1 \n return name", "def genPrefixAntString(self,estimatedVar,prefix=\"_\"):\n self.prefixAntString = self.antString\n for name in estimatedVar:\n self.prefixAntString = replaceVariable(self.prefixAntString,\n name,prefix+name)", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def expand_abbrevs(name):\n key = name.upper()\n for abbrev, word in ABBREVS.iteritems():\n key = re.sub(abbrev, word, key)\n \n #Remove (.*) from the street name\n key = re.sub(r'\\(.*?(:?\\)|$)', '', key)\n \n #Unify names\n key = NUMBER_IN_NAMES_REGEX.sub(lambda i: i.group(1) + \" \", key)\n key = re.sub(u\"Ё\", u\"Е\", key)\n key = re.sub(u\"[\\\"'«»№]\", u\" \", key)\n\n # remove \"им\" prefix\n key = re.sub(ur'[^\\s]ИМ[\\.\\s]+', u' ', key)\n\n #Change name parts order\n words = key.split(r\" \")\n words.sort()\n key = \" \".join(words)\n\n key = re.sub(u\"\\s+\", u\" \", key).strip()\n\n logging.debug(\"Street name %s was converted to %s\" % (name, key))\n \n return key", "def dot_name(number):\n\tif number > 0:\n\t\treturn \"P {}\".format(number)\n\telse:\n\t\treturn \"O {}\".format(-number)", "def main():\n\n rules, evolutions = [int(i) for i in input().strip().split()]\n\n rule = {}\n for _ in range(rules):\n start, finish = input().strip().split(' -> ')\n rule[start] = finish\n\n print(lindenmayor(rule, evolutions, input().strip()))", "def gen_name():\n return choice(globals()[choice(['oc_males', 'oc_females'])]) + ' ' + choice(na_surnames)", "def get_variables():\n policer_data = {\n \"policer_data\": {\n \"name\": \"policy1\",\n \"cir\": 450,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"meter-action-drop\"\n }\n },\n \"policer_data_oper\": {\n \"name\": \"policy1\",\n \"cir\": 450,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"policer:meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"policer:meter-action-drop\"\n }\n },\n \"policer_data_2\": {\n \"name\": \"policy1\",\n \"cir\": 900,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"meter-action-drop\"\n }\n },\n \"policer_data_oper_2\": {\n \"name\": \"policy1\",\n \"cir\": 900,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"policer:meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"policer:meter-action-drop\"\n }\n },\n \"policer_data_3\": {\n \"name\": \"policy1\",\n \"cir\": 100,\n \"eir\": 150,\n \"cb\": 200,\n \"eb\": 300,\n \"rate-type\": \"pps\",\n \"round-type\": \"closest\",\n \"type\": \"2r3c-2698\",\n \"conform-action\": {\n \"meter-action-type\": \"meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"meter-action-mark-dscp\",\n \"dscp\": \"AF22\"\n },\n \"violate-action\": {\n \"meter-action-type\": \"meter-action-drop\"\n },\n \"color-aware\": True\n },\n \"policer_data_oper_3\": {\n \"name\": \"policy1\",\n \"cir\": 100,\n \"eir\": 150,\n \"cb\": 200,\n \"eb\": 300,\n \"rate-type\": \"pps\",\n \"round-type\": \"closest\",\n \"type\": \"2r3c-2698\",\n \"conform-action\": {\n \"meter-action-type\": \"policer:meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"policer:meter-action-mark-dscp\",\n },\n \"violate-action\": {\n \"meter-action-type\": \"policer:meter-action-drop\"\n },\n \"color-aware\": True\n },\n\n \"acl_tables\": {\n # settings for policer tables\n \"hc_acl_table\": {\n \"name\": \"table0\",\n \"nbuckets\": 2,\n \"memory_size\": 1048576,\n \"skip_n_vectors\": 12,\n \"miss_next\": \"permit\",\n \"mask\": \"00:00:00:00:00:00:00:00:00:00:00:00:ff:ff:ff:ff\"\n },\n # setting for acl sessions\n \"hc_acl_session\": {\n \"match\": \"00:00:00:00:00:00:00:00:00:00:00:00:C0:A8:7A:01\",\n \"policer_hit_next\": \"policy1\",\n \"color_classfier\": \"exceed-color\",\n },\n \"hc_acl_session2\": {\n \"match\": \"00:00:00:00:00:00:00:00:00:00:00:00:C0:A8:7A:02\",\n \"policer_hit_next\": \"policy1\",\n \"color_classfier\": \"exceed-color\",\n },\n },\n }\n return policer_data", "def test_expand_refname():\n\n if 'lref' in os.environ: del os.environ['lref']\n assert expand_refname('lref$something_tds.fits') == 'something_tds.fits', \\\n \"Without lref, didn't return None\"\n\n os.environ['lref'] = '/grp/hst/cdbs/lref/'\n full_refname = os.path.join( os.environ['lref'], 'something_tds.fits')\n assert expand_refname( 'lref$something_tds.fits' ) == full_refname, \\\n \"With lref, didn't find the file\"\n\n assert expand_refname( '' ) == '', \"didn't return blank on blank\"", "def main():\n\n precomp = {}\n for op1 in '+-*/':\n for op3 in '+-*/':\n for op5 in '+-*/':\n text = '4 ' + ' 4 '.join([op1, op3, op5]) + ' 4'\n precomp[eval2(text)] = text\n\n for _ in range(int(input())):\n number = int(input())\n if number in precomp:\n print(precomp[number], '=', number)\n else:\n print('no solution')" ]
[ "0.5789567", "0.5612758", "0.56002617", "0.5582453", "0.5527549", "0.5454671", "0.5450963", "0.5440792", "0.5437476", "0.54072136", "0.5388782", "0.53822136", "0.53622717", "0.5361482", "0.53562933", "0.5325314", "0.5280684", "0.52761763", "0.5207379", "0.5178423", "0.5170255", "0.5102485", "0.5090245", "0.50771946", "0.5020704", "0.50157154", "0.50134194", "0.50103205", "0.5000996", "0.4957311", "0.49512044", "0.49428305", "0.49393606", "0.49388877", "0.49040487", "0.48884833", "0.48830682", "0.48788056", "0.48532405", "0.48525697", "0.48352826", "0.4825802", "0.48069966", "0.48047987", "0.47963157", "0.47919402", "0.47850004", "0.47734058", "0.47691607", "0.47669044", "0.47532", "0.47264925", "0.47189394", "0.47174117", "0.47174117", "0.4716557", "0.47078517", "0.46938398", "0.46934572", "0.46931386", "0.4683028", "0.4680845", "0.46772757", "0.4661052", "0.46560183", "0.46541843", "0.46496028", "0.46492556", "0.46473423", "0.46430612", "0.4642397", "0.46387324", "0.46321943", "0.4626283", "0.4625919", "0.4621162", "0.46201724", "0.46131116", "0.46078354", "0.46051222", "0.46030036", "0.4602352", "0.4601534", "0.45941603", "0.45912838", "0.45788938", "0.4577035", "0.4577035", "0.45765793", "0.4571976", "0.45677206", "0.45626277", "0.45523113", "0.4551816", "0.45500955", "0.4547831", "0.45466644", "0.45434204", "0.4543109", "0.45427343", "0.4541705" ]
0.0
-1
Evaluate the given python code
async def evaluate(ctx, *, command): if match := re.fullmatch(r"(?:\n*)?`(?:``(?:py(?:thon)?\n)?((?:.|\n)*)``|(.*))`", command, re.DOTALL): code = match.group(1) if match.group(1) else match.group(2) str_obj = io.StringIO() # Retrieves a stream of data try: with contextlib.redirect_stdout(str_obj): exec(code) except Exception as e: return await ctx.send(f"""❌ Your code completed with execution code 1 ``` {e.__class__.__name__}: {e} ```""") return await ctx.send(f"""✅ Your code completed with execution code 0 ``` {str_obj.getvalue()} ```""") embed = discord.Embed(description="Error: Invalid format", color=0xED2525) return await ctx.send(embed=embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluateCode(lang, code):", "def process_python(data, code):\n\tx=data\n\treturn eval(code)", "async def evaluate(self, ctx, *, code):\n # [p]evaluate <code>\n\n code = code.strip('` ')\n python = '```py\\n{}\\n```'\n result = None\n\n global_vars = globals().copy()\n global_vars['bot'] = self.bot\n global_vars['ctx'] = ctx\n global_vars['message'] = ctx.message\n global_vars['author'] = ctx.message.author\n global_vars['channel'] = ctx.message.channel\n global_vars['server'] = ctx.message.server\n\n try:\n result = eval(code, global_vars, locals())\n except Exception as e:\n await self.bot.say(python.format(type(e).__name__ + ': ' + str(e)))\n return\n\n if asyncio.iscoroutine(result):\n result = await result\n\n result = python.format(result)\n if not ctx.message.channel.is_private:\n censor = CacheAPI.get(key='dwarf_token')\n r = \"[EXPUNGED]\"\n for w in censor:\n if w != \"\":\n result = result.replace(w, r)\n result = result.replace(w.lower(), r)\n result = result.replace(w.upper(), r)\n await self.bot.say(result)", "def evaluate(compiled_expression):", "def evaluate(self, source):\n return self.interpreter.evaluate(source)", "def code():", "def evaluateValue(compiled_expression):", "def evaluate(self, line):\n locals = self.curframe().f_locals\n globals = self.curframe().f_globals\n try:\n code = compile(line + '\\n', '<stdin>', 'single')\n exec(code, globals, locals)\n except Exception:\n import sys\n t, v = sys.exc_info()[:2]\n if isinstance(t, type('')):\n exc_type_name = t\n else:\n exc_type_name = t.__name__\n print('*** {}: {}'.format(exc_type_name, v))", "def eval(*args, **kwargs)->Any:\n pass", "def eval_python(expression, _globals, _locals=None):\n return eval(expression, _globals, _locals)", "def Exec_Python(code):\n # pylint: disable=exec-used\n try:\n exec(code, globals())\n # pylint: disable=broad-except\n # pylint: disable=bare-except\n except:\n _LOGGER.error('Execution of following code has failed %s', code)\n return False\n return True", "def eval(*args, **kwargs):\n\n pass", "async def _eval(self, ctx, *, code):\r\n env = {\r\n 'self': self,\r\n 'bot': self.bot,\r\n 'ctx': ctx,\r\n 'message': ctx.message,\r\n 'guild': ctx.guild,\r\n 'channel': ctx.channel,\r\n 'author': ctx.author,\r\n 'me': ctx.me,\r\n 'that': self.last_result\r\n }\r\n env.update(globals())\r\n\r\n stdout = io.StringIO()\r\n\r\n toCompile = f'async def func():\\n{textwrap.indent(code, \" \")}'\r\n\r\n try:\r\n exec(toCompile, env)\r\n except Exception as e:\r\n em = discord.Embed(description=f\"Excecuted and errored: {e.__class__.__name__}: {e}\",\r\n color=0xff0000)\r\n em.set_author(name=\"Evaluated and errored\",\r\n icon_url=ctx.message.author.avatar_url.replace(\"?size=1024\", \"\"))\r\n em.set_footer(text=\"Executed by: \" + str(ctx.message.author))\r\n em.set_thumbnail(\r\n url='https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/Red_x.svg/480px-Red_x.svg.png')\r\n em.add_field(name=\"Code\", value=f\"[See here.]({hastebin.post(code.encode('utf-8'))}.py)\")\r\n return await ctx.send(embed=em)\r\n\r\n func = env['func']\r\n try:\r\n with redirect_stdout(stdout):\r\n ret = await func()\r\n except Exception as e:\r\n value = stdout.getvalue()\r\n em = discord.Embed(description=f\"Excecuted and errored: ```py\\n{value}{traceback.format_exc()}```\",\r\n color=0xff0000)\r\n em.set_author(name=\"Evaluated and errored\",\r\n icon_url=ctx.message.author.avatar_url.replace(\"?size=1024\", \"\"))\r\n em.set_footer(text=\"Executed by: \" + str(ctx.message.author))\r\n em.set_thumbnail(\r\n url='https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/Red_x.svg/480px-Red_x.svg.png')\r\n em.add_field(name=\"Code\", value=f\"[See here.]({hastebin.post(code.encode('utf-8'))}.py)\")\r\n await ctx.send(embed=em)\r\n else:\r\n value = stdout.getvalue()\r\n if ret is None or type(ret) is discord.Message:\r\n if value:\r\n x = f\"{value}\"\r\n self.last_result = value\r\n else:\r\n x = \"Executed successfully with no objects returned.\"\r\n else:\r\n x = f\"Executed successfully and returned: {value}{ret}\"\r\n self.last_result = f\"{value}{ret}\"\r\n em = discord.Embed(description=x, color=0x00ff00)\r\n em.set_author(name=\"Evaluated with success\",\r\n icon_url=ctx.message.author.avatar_url.replace(\"?size=1024\", \"\"))\r\n em.set_footer(text=\"Executed by: \" + str(ctx.message.author))\r\n em.set_thumbnail(url='http://www.iconsdb.com/icons/preview/green/checked-checkbox-xxl.png')\r\n em.add_field(name=\"Code\", value=f\"[See here.]({hastebin.post(code.encode('utf-8'))}.py)\")\r\n await ctx.send(embed=em)", "def evaluateText(compiled_expression):", "def eval(self):\n pass", "def eval(self):\n pass", "def eval(self):\n pass", "def eval(self, code: str, **data):\n self.actions.append({'type': 'eval', 'code': code, 'data': data})", "def program_eval(node):\n\n table = Memory(node.main_vars)\n return block_eval(node.main.block, table)", "def visit_Python(self, node):\n py_code = compile(node.py_ast, self.filename, mode='exec')\n bp_code = Code.from_code(py_code)\n # Skip the SetLineo and ReturnValue codes\n self.code_ops.extend(bp_code.code[1:-2])", "def visit_Python(self, node):\n # This compiles the given Python ast into a Python code object\n # then disassembles it into a byteplay code object. This allows\n # us to interleave the instructions with those generated for\n # the rest of the module and then compile a single unified \n # code object.\n py_code = compile(node.py_ast, self.filename, mode='exec')\n bpc = Code.from_code(py_code)\n # Skip the SetLineo and ReturnValue codes\n self.code_ops.extend(bpc.code[1:-2])", "def eval(expr):\n global simulator\n\n if simulator is None:\n print \"program is not running\"\n return\n return simulator.eval (expr)", "def py_exec(self, code_string):\n if not isinstance(code_string, str):\n print('py_exec: Error, the code must be a string `{}`!'.format(code_string))\n return False\n\n try: ret = eval(code_string, self.global_vars, self.global_vars)\n except Exception, e:\n print('py_exec: Error execution code `{}`! Exception `{}`!'.format(code_string, e))\n ret = False\n\n return ret", "def test(self):\n self.eval()", "def eval(self, code):\n if self._ws is None:\n raise RuntimeError('App not connected')\n self._send_command('EVAL ' + code)", "def exec_code(code, db, write=True):\n evaler = Evaluator(db, write=write)\n glb = {}\n loc = ExecutionContext(evaler=evaler)\n exec(code, glb, loc)", "def _eval_python(loop, context, params=None, add_boilerplate=False, namespace=None):\n params = params # pylint\n \n # Are we actually doing this?\n if (not context.do_jit):\n return False\n\n # Emulating full VB programs in Python is difficult, so for now skip loops\n # that Execute() dynamic VB.\n full_code_vba = safe_str_convert(loop).replace(\"\\n\", \"\\\\n\")\n code_vba = full_code_vba[:20]\n code_vba_lower = full_code_vba.lower()\n if (not context.throttle_logging):\n log.info(\"Starting JIT emulation of '\" + code_vba + \"...' ...\")\n if ((\"Execute(\".lower() in code_vba_lower) or\n (\"ExecuteGlobal(\".lower() in code_vba_lower) or\n (\"Eval(\".lower() in code_vba_lower)):\n log.warning(\"Loop Execute()s dynamic code. Not JIT emulating.\")\n return False\n if (\".Item(\".lower() in code_vba_lower):\n log.warning(\"Loop references forms with .Item(). Not JIT emulating.\")\n return False\n \n # Generate the Python code for the VB code and execute the generated Python code.\n # TODO: Remove dangerous functions from what can be exec'ed.\n code_python = \"\"\n try:\n\n # For JIT handling we modify the values of certain variables to\n # handle recursive python code generation, so make a copy of the\n # original context.\n tmp_context = Context(context=context, _locals=context.locals, copy_globals=True)\n \n # Get the Python code for the loop.\n if (not context.throttle_logging):\n log.info(\"Generating Python JIT code...\")\n code_python = to_python(loop, tmp_context)\n if add_boilerplate:\n var_inits, _ = _loop_vars_to_python(loop, tmp_context, 0)\n func_defns = _called_funcs_to_python(loop, tmp_context, 0)\n code_python = _boilerplate_to_python(0) + \"\\n\" + \\\n func_defns + \"\\n\" + \\\n var_inits + \"\\n\" + \\\n code_python + \"\\n\" + \\\n _check_for_iocs(loop, tmp_context, 0) + \"\\n\" + \\\n _updated_vars_to_python(loop, tmp_context, 0)\n if (log.getEffectiveLevel() == logging.DEBUG):\n safe_print(\"JIT CODE!!\")\n safe_print(code_python)\n #print \"REMOVE THIS!!!\"\n #sys.exit(0)\n if (not context.throttle_logging):\n log.info(\"Done generating Python JIT code.\")\n\n # Extended ASCII strings are handled differently in VBScript and VBA.\n # Punt if we are emulating VBA and we have what appears to be extended ASCII\n # strings. For performance we are not handling the MS VBA extended ASCII in the python\n # JIT code.\n if (not context.is_vbscript):\n \n # Look for non-ASCII strings.\n non_ascii_pat = r'\"[^\"]*[\\x7f-\\xff][^\"]*\"'\n non_ascii_pat1 = r'\"[^\"]*(?:\\\\x7f|\\\\x[89a-f][0-9a-f])[^\"]*\"'\n if ((re.search(non_ascii_pat1, code_python) is not None) or\n (re.search(non_ascii_pat, code_python) is not None)):\n log.warning(\"VBA code contains Microsoft specific extended ASCII strings. Not JIT emulating.\")\n return False\n\n # Check for dynamic code execution in called functions.\n if (('\"Execute\", ' in code_python) or\n ('\"ExecuteGlobal\", ' in code_python) or\n ('\"Eval\", ' in code_python)):\n log.warning(\"Functions called by loop Execute() dynamic code. Not JIT emulating.\")\n return False\n \n # Run the Python code.\n \n # Have we already run this exact loop?\n if (code_python in jit_cache):\n var_updates = jit_cache[code_python]\n if (not context.throttle_logging):\n log.info(\"Using cached JIT loop results.\")\n if (var_updates == \"ERROR\"):\n log.error(\"Previous run of Python JIT loop emulation failed. Using fallback emulation for loop.\")\n return False\n\n # No cached results. Run the loop.\n elif (namespace is None):\n\n # JIT code execution goes not involve emulating VB GOTOs.\n context.goto_executed = False\n \n # Magic. For some reason exec'ing in locals() makes the dynamically generated\n # code recognize functions defined in the dynamic code. I don't know why.\n if (not context.throttle_logging):\n log.info(\"Evaluating Python JIT code...\")\n exec code_python in locals()\n else:\n\n # JIT code execution goes not involve emulating VB GOTOs.\n context.goto_executed = False\n\n # Run the JIT code in the given namespace.\n exec(code_python, namespace)\n var_updates = namespace[\"var_updates\"]\n if (not context.throttle_logging):\n log.info(\"Done JIT emulation of '\" + code_vba + \"...' .\")\n\n # Cache the loop results.\n jit_cache[code_python] = var_updates\n \n # Update the context with the variable values from the JIT code execution.\n try:\n for updated_var in var_updates.keys():\n if (updated_var == \"__shell_code__\"):\n continue\n context.set(updated_var, var_updates[updated_var])\n except (NameError, UnboundLocalError):\n log.warning(\"No variables set by Python JIT code.\")\n\n # Update shellcode bytes from the JIT emulation.\n import vba_context\n vba_context.shellcode = var_updates[\"__shell_code__\"]\n\n except NotImplementedError as e:\n log.error(\"Python JIT emulation of loop failed. \" + safe_str_convert(e) + \". Using fallback emulation method for loop...\")\n #safe_print(\"REMOVE THIS!!\")\n #raise e\n return False\n\n except Exception as e:\n\n # Cache the error.\n jit_cache[code_python] = \"ERROR\"\n \n # If we bombed out due to a potential infinite loop we\n # are done.\n if (\"Infinite Loop\" in safe_str_convert(e)):\n log.warning(\"Detected infinite loop. Terminating loop.\")\n return True\n\n # We had some other error. Emulating the loop in Python failed.\n log.error(\"Python JIT emulation of loop failed. \" + safe_str_convert(e) + \". Using fallback emulation method for loop...\")\n if (log.getEffectiveLevel() == logging.DEBUG):\n traceback.print_exc(file=sys.stdout)\n safe_print(\"-*-*-*-*-\\n\" + code_python + \"\\n-*-*-*-*-\")\n return False\n\n # Done.\n return True", "def run_python(request):\r\n if not request.user.is_staff:\r\n raise Http404\r\n c = {}\r\n c['code'] = ''\r\n c['results'] = None\r\n if request.method == 'POST':\r\n py_code = c['code'] = request.POST.get('code')\r\n g = {}\r\n try:\r\n safe_exec(py_code, g)\r\n except Exception as e:\r\n c['results'] = traceback.format_exc()\r\n else:\r\n c['results'] = pprint.pformat(g)\r\n return render_to_response(\"debug/run_python_form.html\", c)", "def do_execute_direct(self, code, silent=False):\n try:\n res = self._scalamagic.eval(code.strip(), raw=False)\n if res:\n return res\n except ScalaException as e:\n return self.Error(e.scala_message)", "def eval_python_blocks(req, body):\n localsdict = {\"request\": req}\n globalsdict = {}\n\n old_stdout = sys.stdout\n old_stderr = sys.stderr\n\n try:\n start = 0\n while body.find(\"<%\", start) != -1:\n start = body.find(\"<%\")\n end = body.find(\"%>\", start) \n\n if start != -1 and end != -1:\n codeblock = body[start+2:end].lstrip()\n\n sys.stdout = StringIO.StringIO()\n sys.stderr = StringIO.StringIO()\n\n try:\n exec codeblock in localsdict, globalsdict\n\n except Exception, e:\n print \"ERROR in processing: %s\" % e\n\n output = sys.stdout.getvalue() + sys.stderr.getvalue()\n body = body[:start] + output + body[end+2:]\n\n finally:\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n\n return body", "def eval(self):\n raise NotImplementedError", "def eval_expr(code, local_dict: DICT, global_dict: DICT):\n expr = eval(\n code, global_dict, local_dict) # take local objects in preference\n return expr", "async def _eval(self, ctx, *, body: str):\n\n env = {\n 'bot': self.bot,\n 'ctx': ctx,\n 'channel': ctx.channel,\n 'author': ctx.author,\n 'guild': ctx.guild,\n 'message': ctx.message,\n '_ret': self._last_result,\n 'conn': Tortoise.get_connection('default')\n }\n\n env.update(globals())\n\n body = self.cleanup_code(body)\n stdout = io.StringIO()\n\n to_compile = f'async def func():\\n{textwrap.indent(body, \" \")}'\n\n try:\n exec(to_compile, env)\n except Exception as e:\n return await ctx.send(f'```py\\n{e.__class__.__name__}: {e}\\n```')\n\n func = env['func']\n try:\n with contextlib.redirect_stdout(stdout):\n ret = await func()\n except Exception as e:\n value = stdout.getvalue()\n await ctx.send(f'```py\\n{value}{traceback.format_exc()}\\n```')\n else:\n value = stdout.getvalue()\n # try:\n # await ctx.message.add_reaction('😎')\n # except:\n # pass\n\n if ret is None:\n if value:\n await ctx.send(f'```py\\n{value}\\n```')\n else:\n self._last_result = ret\n await ctx.send(f'```py\\n{value}{ret}\\n```')", "def evaluateStructure(compiled_expression):", "def eval(self, *args, **kwargs):\n raise NotImplementedError", "def evaluate(self):\n pass", "def evaluate(self):\n pass", "def eval(self, line):\n self.eval(line)", "def evaluate(self) :\n pass", "def eval(self, expr, locals):\r\n sav = self.locals_ptr\r\n self.locals_ptr = locals\r\n x = eval(self.compile(expr), {\"__builtins__\":self.eval_allowed_globals}, locals)\r\n self.locals_ptr = sav\r\n return x", "def validate_Exec_Python(result, _dummy_code):\n return result", "def evaluate(self) -> int:", "def _safe_eval(expr, functions_and_constants={}, check_compiling_input=True):\n\n # Some safety checks\n assert len(expr) < 1024\n\n # Check for potential bad compiler input\n if check_compiling_input:\n check_for_pow(expr)\n\n # Compile Python source code to Python code for eval()\n code = compile(expr, '', 'eval')\n\n # Dissect bytecode back to Python opcodes\n ops = disassemble(code)\n assert len(ops) < 1024\n\n stack = []\n for op in ops:\n value = op.touch_value(stack, functions_and_constants)\n\n return value", "def code(self):\n if not self._code:\n filename = '<fluxtools function %s>' % self.tag\n self._code = compile(self.math, filename, mode='eval')\n return self._code", "async def eval_(ctx, *, code: str):\n code = code.strip(\"` \")\n message = ctx.message\n try:\n result = eval(code)\n if inspect.isawaitable(result):\n result = await result\n except Exception as e:\n await bot.say(\"```py\\nInput: {}\\n{}: {}```\".format(code, type(e).__name__, e))\n else:\n await bot.say(\"```py\\nInput: {}\\nOutput: {}\\n```\".format(code, result))\n await bot.delete_message(message)", "def evaluateMacro(compiled_expression):", "def run_code():\n\n output = None\n code = request.json['code']\n\n cmd = 'python -c \"' + code +'\"'\n p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE,\n stderr=STDOUT, close_fds=True)\n output = p.stdout.read()\n\n return jsonify(output.decode('utf-8'))", "def eval(self):\n return self._eval_node(self.syntax_tree)", "def execute(self, code, environment = dict()):\r\n if not self.config.get('scripting', 'enable') and type(code) == str:\r\n self.send(code, log = False)\r\n else:\r\n if type(code) == str:\r\n c = compile(code, 'errors.log', 'exec')\r\n else:\r\n c = code\r\n eval(c, self.getEnvironment(environment))", "def code(bot, msg, language, _, code):\n uri = 'https://eval.in/'\n data = {\n \"utf8\": \"\\xce\\xbb\",\n \"execute\": \"on\",\n \"private\": \"on\",\n \"lang\": supported_languages[language],\n \"input\": \"\",\n \"code\": util.flatten_incoming_text(bot, code).encode('utf-8'),\n }\n response = requests.post(uri, data)\n bot.debug(response.url)\n _, html = response.content.split(\"<h2>Program Output</h2>\", 1)\n html = html.lstrip()\n html = html[5: html.index(\"</pre>\")]\n output = util.unescape(html).rstrip().decode('utf-8')\n if output:\n try:\n bot.reply(u\"```{}```\".format(output))\n except exception.MessageTooLongException:\n bot.reply(response.url)\n else:\n bot.reply(\"No output...\")", "def eval(self) -> typing.Any:\n return self.expr()", "def runeval(self, expr, globals=None, locals=None):\n if globals is None:\n import __main__\n globals = __main__.__dict__\n if locals is None:\n locals = globals\n self.reset()\n sys.settrace(self.trace_dispatch)\n try:\n return eval(expr, globals, locals)\n except BdbQuit:\n pass\n finally:\n self.quitting = True\n sys.settrace(None)", "def evaluate(self, tree):\n\t\tpass", "def RunScript(code):\n with ScriptContext() as script_module:\n try:\n exec code in script_module.__dict__\n except:\n # Get exception output as close to exec as possible.\n # We don't take the first entry in the traceback because it just contains\n # \"exec\". Everything after that is the submitted code.\n try:\n etype, evalue, tb = sys.exc_info()\n traceback.print_exception(etype,\n evalue,\n tb.tb_next, # one frame up\n file=sys.stderr)\n finally:\n del tb # break circular references when using exc_info\n\n return sys.stdout.getvalue(), sys.stderr.getvalue()", "def test_expr(self):\n x = t.Action(\"returnStuff()\")\n self.assertEqual(writePython(x),\n dd(\"\"\"\n _G_python_1, lastError = eval('returnStuff()', self.globals, _locals), None\n self.considerError(lastError, None)\n _G_python_1\n \"\"\"))", "def _eval(self, code, locals_dict=None, tsession=None):\n _logger.debug(\"_eval locals_dict: %s\" % locals_dict)\n t0 = time.time()\n locals_dict = self._update_locals_dict(locals_dict, tsession)\n globals_dict = self._get_globals_dict()\n if code:\n safe_eval(code, globals_dict, locals_dict, mode=\"exec\", nocopy=True)\n eval_time = time.time() - t0\n _logger.debug('Eval in %.2fs \\nlocals_dict:\\n%s\\nCode:\\n%s\\n', eval_time, locals_dict, code)\n return locals_dict", "def code_compile_and_run(code = '', gv = {}, lv = {}, return_keys = []):\n code_ = compile(code, \"<string>\", \"exec\")\n exec(code, gv, lv)\n # no keys given, return entire local variables dict\n if len(return_keys) < 1:\n return lv\n # single key given, return just the value of this entry\n elif len(return_keys) == 1:\n if return_keys[0] in lv:\n return lv[return_keys[0]]\n # several keys given, filter local variables dict by these keys and return\n else:\n return dict([(k, lv[k]) for k in return_keys if k in lv])", "def runEval(self, cmdText, varName, rhs):\n\n _globals= self._globals\n\n self.logPyCode= '# Error: ' + str(varName) + '=' + str(rhs)\n try:\n rhs= self.pyFromVec(rhs)\n except:\n print(\"Error: runEval at pyFromVec: \" + rhs)\n\n try:\n rhs= self.pyFromEng(rhs)\n except:\n print(\"Error: runEval at pyFromEng: \" + rhs)\n\n saved_handler = np.seterrcall(self.err_handler)\n save_err = np.seterr(divide='call', over='call', under='call',\n invalid='call')\n\n if varName:\n self.logPyCode= str(varName) + '=' + str(rhs)\n else:\n self.logPyCode= str(rhs)\n\n try:\n result= eval(str(rhs), _globals, _globals)\n _globals[str(varName)]= result\n success= True\n except:\n self.logCommandRHS= '# Error: ' + self.logPyCode\n result= 0\n success= False\n np.seterrcall(saved_handler)\n return result, success", "def eval(self, node):\n\n return None", "def eval_statement(self, line):\n if line[0] in self.env:\n self.env[line[0]](line[1::])\n elif line[1] == \"=\":\n self.assign_to_env(line)\n else:\n print(\"ERROR: Undefined function {}\".format(line[0]))\n quit()", "def ev(expr):\n return eval(expr,user_ns())", "def evaluate(expression: str) -> Any:\n # Compile the expression\n try:\n code = compile(expression, \"<string>\", \"eval\")\n except SyntaxError as e:\n print(f\"The given function is not syntactically correct! Error:\\n{e}\")\n return \"ERROR\"\n\n # Validate allowed names\n for name in code.co_names:\n if name not in Calculator.ALLOWED_NAMES:\n raise NameError(f\"The use of '{name}' is not allowed\")\n\n return eval(code, {\"__builtins__\": {}}, Calculator.ALLOWED_NAMES)", "def eval_line(code, module_dict):\n output, result, exc_info, is_syn = \\\n capturing_stdout(lambda: eval(code, module_dict))\n if exc_info is not None:\n # If a line failed to parse as an expression, it might be the\n # line was meant as a statement. (Properly we should first try\n # to *parse* instead of *eval*, above. XXX Distinguishing them\n # in this way instead is a lazy hack which will misbehave in\n # rare cases.)\n if is_syn:\n def thunk(): exec(code, module_dict)\n output, result, exc_info, is_syn = capturing_stdout(thunk)\n parts = []\n if output: parts.append(OutputPart(output))\n if result is not None: parts.append(OutputPart(repr(result)))\n if exc_info is not None: parts.append(format_exception(exc_info))\n return parts", "def evaluateExpression(expr):\n\toperators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,\n\t\t\t\t ast.Div: op.truediv, ast.USub: op.neg, ast.Pow: myPow}\n\tnode = ast.parse(expr.strip(), mode='eval')\n\treturn evaluate(node.body,operators)", "def compile(expression):", "def _interpreter(self, inp):\n inp = inp.strip()\n elem = inp.split(\" \")\n return self.controller.execute(elem[0], elem[1:])", "def source_to_code(self, data, path):\n\t\treturn _call_with_frames_removed(compile, data, path, 'exec', dont_inherit=True)", "def Eval(expression):\n # pylint: disable=eval-used\n return eval(expression)", "def execute(self, code):\n code = code()\n\n # Build an AST tree from the Python code, to get the line number of each statement\n try:\n nodes = compiler.parse(code).getChildNodes()[0].getChildNodes()\n lines = [node.lineno - 1 for node in nodes]\n except:\n self.executions += '>>> ' + code + '\\n' + ''.join(traceback.format_exception(*sys.exc_info())[4:])\n return\n\n code = code.splitlines()\n\n with IDEFrameContext.exec_lock:\n stdout = sys.stdout\n\n try:\n # Iterate over all the statements\n for (a, b) in zip(lines, lines[1:] + [None]):\n sys.stdout = StringIO()\n\n source = code[a:b]\n\n try:\n # Execute the statement using this local and global context\n frame = self.get_frame()\n exec compile('\\n'.join(source), '<web>', 'single', 0, 1) in frame.f_locals, frame.f_globals\n except:\n print ''.join(traceback.format_exception(*sys.exc_info())[2:]).rstrip()\n\n self.executions += '\\n'.join([('... ' if line.startswith(' ') else '>>> ') + line for line in source]) + '\\n' + sys.stdout.getvalue()\n finally:\n sys.stdout = stdout", "def sbox_exec(self, source: str):\n return exec(source, self.sbox_globals, self.sbox_locals)", "def eval(self):\n raise NotImplementedError('Must define eval function to use this base class')", "def get_expr(self, expr): # secured\n expr = utils.condition_source_code_keys(expr, self.get_required_parkeys())\n try:\n return expr, MAPPING_VERIFIER.compile_and_check(expr, source=self.basename, mode=\"eval\")\n except crexc.MappingFormatError as exc:\n raise crexc.MappingFormatError(\"Can't load file \" + repr(self.basename) + \" : \" + str(exc)) from exc", "def run(self, code='', file=''):\n if file and code:\n print('WARNING: reading file instead of the code')\n\n if file:\n source = Path(file)\n if source.exists():\n if not source.is_file():\n self.__abort(ERR_CODE_NOT_FILE)\n if file[len(file) - 3:] != EXTENSION:\n self.__abort(ERR_CODE_NOT_SOURCE)\n with source.open() as f:\n self.__code = f.read()\n else:\n self.__abort(ERR_CODE_FILE_MISSING)\n else:\n self.__code = code\n\n self.__tokenize()\n return self.__execute()", "def _score_code(self, code):\n # Get list of 2-tuples, each containing an input sequence and an output\n # sequence.\n io_seqs = self.task.make_io_set()\n terminal_reward = 0.0\n results = []\n reason = 'correct'\n for input_seq, output_seq in io_seqs:\n eval_result = bf.evaluate(\n code, input_buffer=input_seq, timeout=0.1,\n max_steps=self.max_execution_steps,\n base=self.task.base,\n require_correct_syntax=self.require_correct_syntax)\n result, success = eval_result.output, eval_result.success\n if not success:\n # Code execution timed out.\n terminal_reward = self.failure_reward\n results = []\n reason = eval_result.failure_reason\n break\n else:\n terminal_reward += self.reward_fn(result, output_seq, self.task.base)\n if result == output_seq:\n terminal_reward += self.correct_bonus # Bonus for correct answer.\n\n # Only add additional reward for shorter code. Subtracting reward\n # interferes with the main objective. Only optimize for length once\n # any solution is found.\n if self.min_code_length == self.max_code_length:\n terminal_reward += self.code_length_bonus\n else:\n terminal_reward += self.code_length_bonus * clipped_linear(\n x=len(code), x0=self.min_code_length, y0=1.0,\n slope=-self.time_penalty, y_range=(0.0, 1.0))\n\n # reason remains 'correct' if it is already\n elif reason == 'correct':\n reason = 'wrong'\n results.append(result)\n\n # Return list of rewards, one for each char in the code. All are 0 except\n # for the terminal reward.\n terminal_reward /= self.best_reward\n return misc.RewardInfo(\n episode_rewards=[0.0] * (len(code) - 1) + [terminal_reward],\n input_case=misc.IOTuple(i for i, o in io_seqs),\n correct_output=misc.IOTuple(o for i, o in io_seqs),\n code_output=misc.IOTuple(results),\n input_type=self.input_type,\n output_type=self.output_type,\n reason=reason)", "def evaluate():\n click.echo(\"Not implemented yet. In the future, this command will be used for evaluation.\")\n sys.exit(-2)", "def eval(self, A):\n\t\tpass", "def evaluate_raw(self):\n raise NotImplementedError", "def __call__(self, *args):\n\n func_env = Environment(self.parent)\n self.define_args(func_env, *args)\n return evaluate(self.body, func_env)", "def eval(self):\n raise NotImplemented()", "def process(txt, out):\n r = re.compile(\"<py (.*) >\")\n txt = txt.split('\\n')\n for line in txt:\n res = r.match(line)\n if res: out.write(eval(res.group(1)))\n else: out.write(line + '\\n')", "def run(self, this, actor, args):\n import os\n import sys\n import datetime\n import calendar\n import math\n from mudsling import registry\n from mudsling.config import config\n\n # args['code'] isn't reliable since the semicolon shortcut may skip\n # parsing the args via syntax.\n code = self.argstr\n\n #: @type: Object\n char = actor.possessing\n\n if not code:\n actor.msg(self.syntax_help())\n return False\n\n available_vars = {\n 'eval_cmd': self,\n 'time': time,\n 'datetime': datetime,\n 'calendar': calendar,\n 'game': self.game,\n 'ref': self.game.db.get_ref,\n 'registry': registry,\n 'config': config,\n 'player': actor,\n 'me': char,\n 'here': (char.location if self.game.db.is_valid(char, Object)\n else None),\n 'utils': mudsling.utils,\n 'math': math,\n }\n available_vars.update(sys.modules)\n\n # Support MOO-style objrefs in eval code.\n code = self.objref.sub(r'ref(\\1)', code)\n code = self.objref_escape_fix.sub(r'#\\1', code)\n\n inMsg = string.parse_ansi('{y>>> ') + code + string.parse_ansi(\"{n\")\n actor.msg(inMsg, {'raw': True})\n\n mode = 'eval'\n out = ''\n duration = compile_time = None\n #noinspection PyBroadException\n try:\n begin = time.clock()\n #noinspection PyBroadException\n try:\n compiled = compile(code, '', 'eval')\n except:\n mode = 'exec'\n compiled = compile(code, '', 'exec')\n compile_time = time.clock() - begin\n\n if self.cmdstr == '@profile':\n import cProfile as profile\n import pstats\n import cStringIO as io\n profiler = profile.Profile()\n begin = time.clock()\n profiler.enable()\n ret = profiler.runctx(compiled, {}, available_vars)\n profiler.disable()\n duration = time.clock() - begin\n s = io.StringIO()\n stats = pstats.Stats(profiler, stream=s)\n stats.strip_dirs()\n stats.sort_stats('time')\n stats.print_stats()\n out += s.getvalue() + '\\n'\n s.close()\n else:\n begin = time.clock()\n ret = eval(compiled, {}, available_vars)\n duration = time.clock() - begin\n\n if mode == 'eval':\n out += \"<<< %s\" % repr(ret)\n if isinstance(ret, ObjRef):\n if ret.is_valid():\n name = \"%s (%s)\" % (ret.class_name(),\n ret.python_class_name())\n else:\n name = 'INVALID'\n out += \" [%s]\" % name\n else:\n out = \"<<< Done.\"\n except SystemExit:\n raise\n except:\n error_lines = traceback.format_exc().split('\\n')\n if len(error_lines) > 4:\n error_lines = error_lines[4:]\n out = \"\\n\".join(\"<<< %s\" % line for line in error_lines if line)\n\n raw_string = string.parse_ansi(\"{m\") + out + string.parse_ansi(\"{n\")\n actor.msg(raw_string, {'raw': True})\n if duration is not None:\n msg = \"Exec time: %.3f ms, Compile time: %.3f ms (total: %.3f ms)\"\n actor.msg(msg % (duration * 1000,\n compile_time * 1000,\n (duration + compile_time) * 1000))", "def test_compiled_runner(mock_evaluator, mock_environment):\n functions = [sentinel.Function]\n r = celpy.CompiledRunner(mock_environment, sentinel.AST, functions)\n with raises(NotImplementedError):\n result = r.evaluate({\"variable\": sentinel.variable})", "def execute(self, source):\n tree = ast.parse(source=source)\n self._execute(body=tree.body[:-1], mode=\"exec\")\n self._execute(body=tree.body[-1:], mode=\"single\")", "def eval_node(node, env):\n global genv\n global result\n node_type = node_name(node)\n\n if node_type == 'Expr':\n return eval_node(node.value, env)\n elif node_type == 'Assign':\n val = eval_node(node.value, env)\n\n while type(val) is tuple and len(val) == 2 and (type(val[1]) == GlobalEnv or type(val[1]) == LocalEnv):\n val = val[0]\n\n # extract the variable name, evaluate the RHS, then extend the environment.\n return 0, env.extend([node.targets[0].id], [val])\n elif node_type == 'BinOp':\n # get the left and right operands (we use only single operands) and the operator.\n # evaluate the operands and apply the operator. return the number, env.\n\n left = eval_node(node.left, env)[0]\n right = eval_node(node.right, env)[0]\n\n left = left[0] if type(left) is tuple else left\n right = right[0] if type(right) is tuple else right\n\n op = node_name(node.op)\n\n if op == \"Add\":\n return (left + right), env\n elif op == \"Sub\":\n return (left - right), env\n elif op == \"Mult\":\n return (left * right), env\n elif op == \"Div\":\n return (left / right), env\n elif op == \"Mod\":\n return (left % right), env\n return 0, env\n elif node_type == 'FunctionDef':\n # need the function id (name), args, and body. Extend the environment.\n # you can leave the args wrapped in the ast class and the body and unpack them\n # when the function is called.\n\n return 0, env.extend([node.name], [(node.args, node.body)])\n elif node_type == 'Call':\n # get any values passed in to the function from the Call object.\n # get the fxn name and look up its parameters, if any, and body from the env.\n # get lists for parameter names and values and extend a LocalEnv with those bindings.\n # evaluate the body in the local env, return the value, env.\n\n func = eval_node(node.func, env)[0]\n local_env = LocalEnv(None, env)\n\n args = func[0].args\n body = func[1]\n\n index = 0\n for val in node.args:\n local_env = local_env.extend([args[index].arg], [eval_node(val, local_env)[0]])\n index += 1\n\n for node in body:\n val = eval_node(node, local_env)\n\n if node_name(node) == \"Return\":\n output_val = val[0]\n local_env = val[1]\n return output_val, env\n elif node_type == 'Return':\n # evaluate the node, return the value, env.\n return eval_node(node.value, env)\n elif node_type == 'Name':\n # Name(identifier id)- lookup the value binding in the env\n # return the value, env\n return env.lookup(node.id), env\n # Num(object n) -- a number, return the number, env.\n elif node_type == 'Num':\n return node.n, env", "def SimpleEval(source):\n itertokens = generate_tokens(StringIO(source).readline)\n next = (token[1] for token in itertokens if token[0] is not NL).next\n res = atom(next, next())\n if next():\n raise SyntaxError('bogus data after expression')\n return res", "def complie_and_execute(self, lang, code):\n t_id = threading.current_thread().get_ident()\n self[lang](t_id, code)", "async def remote_eval(self, code):\n async with self.next_step_write:\n self.next_step_content = code\n self.next_step_has_data.set()\n\n await self.last_result_has_data.wait()\n return self.last_result_content", "def py(var, wrapper, message):\n try:\n exec(message)\n except Exception as e:\n wrapper.send(\"{e.__class__.__name__}: {e}\".format(e=e))", "def _eval(self, node, ctx):\n if node is None:\n return None\n elif isinstance(node, ast.Name): # <identifier>\n # lookup identifiers in local namespace\n if node.id in ctx['locals']:\n _local = ctx['locals'][node.id]\n\n # if local variable contains a list, evaluate each element by threading 'get_expr' over it\n if isinstance(_local, list):\n _retlist = []\n for _local_el in _local:\n # non-string elements are simply passed through\n if not isinstance(_local_el, str):\n _retlist.append(_local_el)\n continue\n\n # string-valued elements are evaluated\n try:\n # NOTE: local variable lookup is disabled when threading\n # over lists that were stored in local variables themselves.\n # This is done to prevent infinite recursion errors for\n # expressions which may reference themselves\n _ret_el = self.get_expr(_local_el, locals=None)\n except NameError as e:\n # one element of the list references a local variable\n # -> stop evaluation and return dummy\n # use NameError object instead of None to identifiy\n # dummy elements unambiguously later\n _retlist.append(e)\n else:\n # evaluation succeeded\n _retlist.append(_ret_el)\n return _retlist\n # local variables containing strings are parsed\n elif isinstance(_local, str):\n return self.get_expr(_local, locals=None)\n # all other types are simply passed through\n else:\n return _local\n\n # if no local is found, try a few builtin Python literals\n elif node.id in ('True', 'False', 'None'): # restrict subset of supported literals\n return ast.literal_eval(node.id) # returns corresponding Python literal from string\n\n # if nothing above matched, assume mistyped identifier and give up\n # NOTE: do *not* assume identifier is a ROOT file path. ROOT file paths\n # must be given explicitly as strings.\n else:\n raise NameError(\"Cannot resolve identifier '{}': not a valid Python literal or a registered local variable!\".format(node.id))\n elif isinstance(node, ast.Str): # <string> : array column\n if ctx['input']:\n # lookup in ROOT file\n return self.get(node.s)\n else:\n # return string as-is\n return node.s\n elif isinstance(node, ast.Num): # <number>\n return node.n\n elif isinstance(node, ast.Call): # node names containing parentheses (interpreted as 'Call' objects)\n # -- determine function to call\n\n # function handle is a simple identifier\n if isinstance(node.func, ast.Name):\n\n # handle special functions\n if node.func.id in self.special_functions:\n _spec_func_spec = self.special_functions[node.func.id]\n # callable for special function (default to no-op)\n _callable = _spec_func_spec.get('func', lambda x: x)\n # modify avaluation context for special function\n ctx = dict(ctx, **_spec_func_spec.get('ctx', {}))\n\n # call a registered input function\n else:\n try:\n _callable = ctx['functions'][node.func.id]\n except KeyError as e:\n raise KeyError(\n \"Cannot call input function '{}': no such \"\n \"function!\".format(node.func.id))\n\n # function handle is an expression\n else:\n # evaluate 'func' as any other node\n _callable = self._eval(node.func, ctx)\n\n # evaluate unpacked positional arguments, if any\n _starargs_values = []\n if node.starargs is not None:\n _starargs_values = self._eval(node.starargs, ctx)\n\n # starred kwargs (**) not supported for the moment\n if node.kwargs:\n raise NotImplementedError(\n \"Unpacking keyword arguments in expressions via \"\n \"** is not supported. Expression was: '{}'\".format(\n ast.dump(node, annotate_fields=False)))\n\n # evaluate arguments\n _args = map(lambda _arg: self._eval(_arg, ctx), node.args) + _starargs_values\n _kwargs = {\n _keyword.arg : self._eval(_keyword.value, ctx)\n for _keyword in node.keywords\n }\n\n # call function\n return _callable(*_args, **_kwargs)\n elif isinstance(node, ast.BinOp): # <left> <operator> <right>\n return ctx['operators'][type(node.op)](self._eval(node.left, ctx), self._eval(node.right, ctx))\n elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1\n return ctx['operators'][type(node.op)](self._eval(node.operand, ctx))\n elif isinstance(node, ast.Subscript): # <operator> <operand> e.g., -1\n if isinstance(node.slice, ast.Index): # support subscripting via simple index\n return self._eval(node.value, ctx)[self._eval(node.slice.value, ctx)]\n elif isinstance(node.slice, ast.Slice): # support subscripting via slice\n return self._eval(node.value, ctx)[self._eval(node.slice.lower, ctx):self._eval(node.slice.upper, ctx):self._eval(node.slice.step, ctx)]\n else:\n raise TypeError(node)\n elif isinstance(node, ast.Attribute): # <value>.<attr>\n return getattr(self._eval(node.value, ctx), node.attr)\n elif isinstance(node, ast.List): # list of node names\n return [self._eval(_el, ctx) for _el in node.elts]\n elif isinstance(node, ast.Tuple): # tuple of node names\n return tuple(self._eval(_el, ctx) for _el in node.elts)\n else:\n raise TypeError(node)", "def _compile(self, source: str, filename: str) -> CodeType:\n return compile(source, filename, \"exec\") # type: ignore", "def safe_exec(code, globals_dict, random_seed=None, python_path=None, cache=None, slug=None, unsafely=False):\r\n # Check the cache for a previous result.\r\n if cache:\r\n safe_globals = json_safe(globals_dict)\r\n md5er = hashlib.md5()\r\n md5er.update(repr(code))\r\n update_hash(md5er, safe_globals)\r\n key = \"safe_exec.%r.%s\" % (random_seed, md5er.hexdigest())\r\n cached = cache.get(key)\r\n if cached is not None:\r\n # We have a cached result. The result is a pair: the exception\r\n # message, if any, else None; and the resulting globals dictionary.\r\n emsg, cleaned_results = cached\r\n globals_dict.update(cleaned_results)\r\n if emsg:\r\n raise SafeExecException(emsg)\r\n return\r\n\r\n # Create the complete code we'll run.\r\n code_prolog = CODE_PROLOG % random_seed\r\n\r\n # Decide which code executor to use.\r\n if unsafely:\r\n exec_fn = codejail_not_safe_exec\r\n else:\r\n exec_fn = codejail_safe_exec\r\n\r\n # Run the code! Results are side effects in globals_dict.\r\n try:\r\n exec_fn(\r\n code_prolog + LAZY_IMPORTS + code, globals_dict,\r\n python_path=python_path, slug=slug,\r\n )\r\n except SafeExecException as e:\r\n emsg = e.message\r\n else:\r\n emsg = None\r\n\r\n # Put the result back in the cache. This is complicated by the fact that\r\n # the globals dict might not be entirely serializable.\r\n if cache:\r\n cleaned_results = json_safe(globals_dict)\r\n cache.set(key, (emsg, cleaned_results))\r\n\r\n # If an exception happened, raise it now.\r\n if emsg:\r\n raise e", "def _extract_context(self, tree):\r\n context = {}\r\n context['seed'] = self.seed\r\n context['anonymous_student_id'] = self.capa_system.anonymous_student_id\r\n all_code = ''\r\n\r\n python_path = []\r\n\r\n for script in tree.findall('.//script'):\r\n\r\n stype = script.get('type')\r\n if stype:\r\n if 'javascript' in stype:\r\n continue # skip javascript\r\n if 'perl' in stype:\r\n continue # skip perl\r\n # TODO: evaluate only python\r\n\r\n for d in self._extract_system_path(script):\r\n if d not in python_path and os.path.exists(d):\r\n python_path.append(d)\r\n\r\n XMLESC = {\"&apos;\": \"'\", \"&quot;\": '\"'}\r\n code = unescape(script.text, XMLESC)\r\n all_code += code\r\n\r\n if all_code:\r\n try:\r\n safe_exec(\r\n all_code,\r\n context,\r\n random_seed=self.seed,\r\n python_path=python_path,\r\n cache=self.capa_system.cache,\r\n slug=self.problem_id,\r\n unsafely=self.capa_system.can_execute_unsafe_code(),\r\n )\r\n except Exception as err:\r\n log.exception(\"Error while execing script code: \" + all_code)\r\n msg = \"Error while executing script code: %s\" % str(err).replace('<', '&lt;')\r\n raise responsetypes.LoncapaProblemError(msg)\r\n\r\n # Store code source in context, along with the Python path needed to run it correctly.\r\n context['script_code'] = all_code\r\n context['python_path'] = python_path\r\n return context", "def evaluateBoolean(compiled_expression):", "def evaluate_program(ast,prog_name='prior_sampling',prog_args=[]):\n PROCS = {}\n for i in range(len(ast)-1):\n proc = ast[i]\n proc_name, proc_arg_names, proc_expr = proc[1], proc[2], proc[3]\n PROCS[proc_name] = (proc_arg_names,proc_expr)\n\n def eval(expr, sigma, scope):\n if is_const(expr, scope):\n if type(expr) in [int, float]:\n expr = torch.Tensor([expr]).squeeze()\n return expr, sigma\n elif is_var(expr, scope):\n return scope[expr], sigma\n elif is_let(expr, scope):\n var_name, sub_expr, final_expr = expr[1][0], expr[1][1], expr[2]\n var_value, sigma = eval(sub_expr, sigma, scope)\n return eval(final_expr, sigma, {**scope, var_name: var_value})\n elif is_if(expr,scope):\n cond_expr, true_expr, false_expr = expr[1], expr[2], expr[3]\n cond_value, sigma = eval(cond_expr, sigma, scope)\n if cond_value:\n return eval(true_expr, sigma, scope)\n else:\n return eval(false_expr, sigma, scope)\n elif is_sample(expr,scope):\n dist_expr = expr[1]\n dist_obj, sigma = eval(dist_expr,sigma,scope)\n return dist_obj.sample(), sigma\n elif is_observe(expr,scope):\n dist_expr, obs_expr = expr[1], expr[2]\n dist_obj, sigma = eval(dist_expr,sigma,scope)\n obs_value, sigma = eval(obs_expr,sigma,scope)\n sigma['logW'] = sigma['logW'] + dist_obj.log_prob(obs_value)\n return obs_value, sigma\n else:\n proc_name = expr[0]\n consts = []\n for i in range(1,len(expr)):\n const, sigma = eval(expr[i],sigma,scope)\n consts.append(const)\n if proc_name in PROCS:\n proc_arg_names, proc_expr = PROCS[proc_name]\n new_scope = {**scope}\n for i, name in enumerate(proc_arg_names):\n new_scope[name] = consts[i]\n return eval(proc_expr, sigma, new_scope)\n else:\n return PRIMITIVES[proc_name](*consts), sigma\n if prog_name == 'prior_sampling':\n return eval(ast[-1], {}, {})\n elif prog_name == 'importance_sampling':\n print('Importance Sampling')\n L = prog_args\n importance_out = []\n for l in range(L):\n r_l, sigma_l = eval(ast[-1],{'logW': 0},{})\n importance_out.append([r_l,sigma_l['logW']])\n\n return importance_out", "def __call__(self,thing):\n return self.compiled(thing)", "def eval_or_exec(expr):\n\n #print(s)\n try:\n try:\n retval = eval(expr)\n except SyntaxError:\n # SyntaxError will be thrown by eval() if s is compound,\n # ie not a simple expression, eg if it contains function\n # definitions, multiple lines, etc. Then we must use\n # exec(). Then we assume that s will define a variable\n # called \"XXXeval_or_exec_outputXXX\", and we'll use that.\n dictionary = {}\n exec(expr, dictionary)\n retval = dictionary[\"XXXeval_or_exec_outputXXX\"]\n except MemoryError:\n # Will be thrown by eval(s) or exec(s) if s contains over-deep\n # nesting (see http://bugs.python.org/issue3971). The amount\n # of nesting allowed varies between versions, is quite low in\n # Python2.5. If we can't evaluate, award bad fitness.\n retval = default_fitness(FITNESS_FUNCTION.maximise)\n return retval", "def eval(self):\n return self.with_transforms(\"eval\")", "def readin(pythonfilename):\n with open(pythonfilename) as f:\n code = f.read()\n FuncLister().visit(ast.parse(code))", "def is_valid_python(code: str) -> bool:\n try:\n ast.parse(code)\n except SyntaxError:\n return False\n return True", "def eval(cls, *args):\n raise NotImplementedError(\"subclasses need to override this method\")" ]
[ "0.81394625", "0.7703716", "0.75885594", "0.73609924", "0.69410247", "0.6835199", "0.68233687", "0.6816809", "0.6794713", "0.67543906", "0.6748919", "0.66606015", "0.6600024", "0.65660536", "0.6557946", "0.6557946", "0.6557946", "0.6556634", "0.6536984", "0.6512488", "0.64603215", "0.6453816", "0.6425804", "0.6397024", "0.6368407", "0.6304785", "0.6278884", "0.6271991", "0.6265682", "0.62534535", "0.6251894", "0.6244782", "0.62264913", "0.61924267", "0.6167245", "0.6147564", "0.6147564", "0.61285776", "0.60963744", "0.6084816", "0.6076091", "0.6060803", "0.60467046", "0.6023442", "0.6022594", "0.60185105", "0.6005406", "0.6004145", "0.5996528", "0.5987894", "0.5954069", "0.5933614", "0.59300554", "0.5926369", "0.5923153", "0.59115565", "0.5896488", "0.58918494", "0.58482605", "0.58211166", "0.5768743", "0.5753791", "0.5719283", "0.5716233", "0.57112634", "0.5709315", "0.57064456", "0.57061195", "0.56904984", "0.5690493", "0.5682048", "0.56439185", "0.5632144", "0.5630042", "0.5626704", "0.56189644", "0.5618481", "0.5615557", "0.55988157", "0.55979246", "0.5594236", "0.55919373", "0.5589218", "0.5584127", "0.5574436", "0.5568981", "0.5562603", "0.55603653", "0.55535924", "0.5552216", "0.5547049", "0.5543256", "0.55360836", "0.55324626", "0.55310893", "0.55254126", "0.5515937", "0.55158156", "0.5514268", "0.5499943" ]
0.63693184
24
Placeholder function This does not rotate the side chains. Only replaces the residues.
def get_neighbour_state(protein_pdb, name, dunbrack_library, buried_residues): new_file=open(name, "a") #chosen_residues = [i for i in buried_residues if get_current_residue(protein_pdb, i) in allowed_residues] residue_to_mutate=rd.choice(buried_residues) current_residue = get_current_residue(protein_pdb, residue_to_mutate) new_residue = rd.choice([i for i in allowed_residues if i!=current_residue]) protein_lines = list(parser.parse_file(protein_pdb)) residue_lines = list(parser.parse_file(residue_path.format(new_residue))) backbone_dictionary = mut.get_backbone_dictionary(mut.get_backbone(protein_lines)) phi = round_to_tens(ra.get_phi(residue_to_mutate, backbone_dictionary)) psi = round_to_tens(ra.get_psi(residue_to_mutate, backbone_dictionary)) main_key = new_residue, phi, psi choices = dunbrack_library[main_key] weighted_choices = [(i, choices[i]) for i in choices] choice = weighted_choice(weighted_choices) new_lines = mut.rotate_to_chis(choice, residue_lines) new_protein = mut.mutate(protein_lines, residue_to_mutate, new_lines) for l in new_protein: line = parser.pdb_format(l) print(line, file=new_file) new_file.close() return name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fill_input(self):\n for sc in self.initial:\n if sc not in self.litter:\n self.litter[sc] = [0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0.]\n for sc in self.litter:\n if sc not in self.initial:\n self.initial[sc] = [0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0.]", "def l_un_degenerate(self):\n self.right = self.tmp", "def _perturbInPlaceHard(self):\n die", "def _rewrite_unary_default(self, node: saldag.UnaryOpNode):\n\n par = next(iter(node.parents))\n if node.is_reversible() and node.is_lower_boundary() and not par.is_root():\n print(\"lower boundary\", node)\n node.get_in_rel().stored_with = copy.copy(node.out_rel.stored_with)\n node.is_mpc = False", "def apply_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n z = unknowns['z']\n resids['z'] = x*z + z - 4.0\n\n # Output equations need to evaluate a residual just like an explicit comp.\n resids['y'] = x + 2.0*z - unknowns['y']\n #print(x, unknowns['y'], z, resids['z'], resids['y'])", "def relax(self):\n rospy.loginfo('Now Arm will be relax')\n self.go_with2([0, 0, 0, 0, 0])", "def replace_lipid(membrane,cholesterol,nreplace) :\n\n if nreplace % 2 != 0 :\n raise ValueError(\"Number of replacements must be divisible by 2\")\n \n lipids_wat = [\"W\"]\n lipids_wat.extend(lipids)\n \n # Store away residues\n lipid_res = [r for r in membrane.residues if r.resname.upper().strip() in lipids ]\n wat_res = [r for r in membrane.residues if r.resname.upper().strip() == \"W\" ]\n other_res = [r for r in membrane.residues if r.resname.upper().strip() not in lipids_wat ]\n \n # Determine the centre of the bilayer\n zsum = 0\n for res in lipid_res :\n for atom in res.atoms :\n if atom.name.strip().upper() == head_atom[res.resname.upper().strip()] :\n zsum = zsum + atom.z\n break\n zmid = zsum / float(len(lipid_res))\n \n # Determine which lipids are in the lower leaflet\n lower = [False]*len(lipid_res)\n for i,res in enumerate(lipid_res) :\n for atom in res.atoms :\n if atom.name.strip().upper() == head_atom[res.resname.upper().strip()] :\n lower[i] = atom.z < zmid \n break\n nlower = sum(lower)\n #print \"Found a distribution of %d lipids in the lower leaflet and %d lipids in the upper leaflet\"%(nlower,len(lipid_res)-nlower)\n \n # Find the indices of the atoms mapping atoms\n indices = {}\n for res in lipid_res :\n resnam = res.resname.upper().strip()\n if resnam in indices : continue\n indices[resnam] = [-1]*len(mapping[resnam])\n for mi,m in enumerate(mapping[resnam]) :\n for i,atom in enumerate(res.atoms) :\n atomnam = atom.name.strip().upper()\n if atomnam == m[1] : \n indices[resnam][mi] = i\n break\n indices[resnam+\"_CHOL\"] = [-1]*len(mapping[resnam])\n for mi,m in enumerate(mapping[resnam]) :\n for i,atom in enumerate(cholesterol.residues[0].atoms) :\n atomnam = atom.name.strip().upper()\n if atomnam == m[0] : \n indices[resnam+\"_CHOL\"][mi] = i\n break\n \n # Do the random replacement\n chol_res = []\n taken = [False]*len(lipid_res)\n nreplace2 = nreplace / 2\n while len(chol_res) < nreplace2 : # First in the upper leaflet\n probe = np.random.randint(0,len(lipid_res))\n while taken[probe] or lower[probe] : \n probe = np.random.randint(0,len(lipid_res))\n taken[probe] = True\n chol_res.append(_fit_chol(lipid_res[probe],cholesterol,indices))\n while len(chol_res) < nreplace : # Then in the lower leaflet\n probe = np.random.randint(0,len(lipid_res))\n while taken[probe] or not lower[probe] : \n probe = np.random.randint(0,len(lipid_res))\n taken[probe] = True\n chol_res.append(_fit_chol(lipid_res[probe],cholesterol,indices))\n\n # Construct a new PDBFile object and renumber\n new_membrane = pdb.PDBFile()\n new_membrane.extend_residues(other_res,copy=True)\n new_membrane.extend_residues([r for i,r in enumerate(lipid_res) if not taken[i]],copy=True)\n new_membrane.extend_residues(chol_res,copy=False)\n new_membrane.extend_residues(wat_res,copy=True)\n new_membrane.renumber(doatoms=True,doresidues=True)\n new_membrane.box = np.array(membrane.box,copy=True)\n return new_membrane", "def r_un_degenerate(self):\n self.left = self.tmp", "def replace(self, *args, **kwargs): # real signature unknown\r\n pass", "def Replace(self, *args):\n return _BRepAlgo.BRepAlgo_AsDes_Replace(self, *args)", "def r_degenerate(self):\n self.tmp = self.left\n self.left = self.right", "def l_degenerate(self):\n self.tmp = self.right\n self.right = self.left", "def test_mut_replace_terminal_none_available(self):\n ind = self.individuals[self.ind_strings[0]]\n ind_clone = self.gama._toolbox.clone(ind)\n\n with self.assertRaises(ValueError) as error:\n mut_replace_terminal(ind_clone, self.gama._pset)\n\n self.assertTrue(\"Individual could not be mutated\" in str(error.exception))", "def relax(self):\n # print(\"putin\", self.level.rhs.reshape(-1)[:])\n # print(\"getout\", self.solver(self.level.rhs.reshape(-1)))\n\n self.level.mid[:] = self.solver(self.level.rhs.reshape(-1)).reshape(self.level.mid.shape)", "def fix_curvature(self) -> None:\n self.n1.fix = True\n self.n2.fix = True", "def test_replace_identity(self):\n pass", "def reset_world(self):\n print(\"Resetting world\")\n\n if self.real:\n angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n angles = self.orig_joint_angles\n self.plan()\n self.robot.arm.SetJointValues(angles)\n self.execute()\n self.execution_robot.arm.SetJointValues(angles)\n for bx, b in enumerate(self.pddl_blocks):\n b.set_base_link_pose(self.orig_block_poses[bx])\n print(\"Done\")", "def BackSubstitution(self, Force):\n\t\tpass", "def replaceOp(image, op, box):\r\n\r\n small = op(image.crop(box))\r\n replace(image, small, box)", "def _clone_layout_placeholders(self, slidelayout):\n latent_ph_types = (PH_TYPE_DT, PH_TYPE_SLDNUM, PH_TYPE_FTR)\n for sp in slidelayout.shapes:\n if not sp.is_placeholder:\n continue\n ph = Placeholder(sp)\n if ph.type in latent_ph_types:\n continue\n self.__clone_layout_placeholder(ph)", "def repl(self):\n raise NotImplementedError", "def solve_nonlinear(self, params, unknowns, resids):\n pass", "def solve_nonlinear(self, params, unknowns, resids):\n pass", "def test_special_substitution_of_identity(free_alg):\n\n dr = free_alg\n p = dr.names\n\n x = IndexedBase('x')\n t = IndexedBase('y')\n a = IndexedBase('a')\n i, j = p.i, p.j\n v = p.v\n w = Vec('w')\n\n orig = dr.sum((i, p.R), x[i] * v[i] + a[i])\n ident_def = dr.define(1, dr.einst(t[i] * w[i]))\n\n res = orig.subst_all([ident_def])\n assert dr.simplify(\n res - dr.einst(x[i] * v[i])\n - dr.sum((i, p.R), (j, p.R), a[i] * t[j] * w[j])\n ) == 0", "def block_unbinding2(x,y):\n res = inv_binding_circular(x,y)\n return res", "def get_final_reconstruction(self):", "def _clone_rip_and_replace(self, memo):\n pass # this function will need to call rip and replace in library on each of the definitions when called from the netlist.\n for definition in self._definitions:\n definition._clone_rip_and_replace(memo)", "def in_place_substitute(self):\r\n if self.substitute is not None:\r\n node = self.convert_type()\r\n self.leaf_replace(node) # for internals only\r\n self.root_replace(node)", "def start_placing(self):\n raise NotImplementedError()", "def replace_in_side(self, would_repl, replacement):\n assert isinstance(would_repl, Side) and isinstance(replacement, Side)\n assert self.contains(would_repl)\n # remove all elements from would_repl in self\n for var in would_repl.__vars:\n self.pop_variable(var)\n\n # add all elements from replacement to self\n self.add_side(replacement)", "def CopyReplaceVertices(self, *args):\n return _ShapeBuild.ShapeBuild_Edge_CopyReplaceVertices(self, *args)", "def restore_parameters(self):\n for p in self.parameters:\n setattr(self, p, self.parameters[p].init_value)\n self.set_symmetry()", "def fL():\n for n in b.allNodes():\n n.autoplace()", "def _replace_placeholder(component: tfx_base_node.BaseNode) -> None:\n keys = list(component.exec_properties.keys())\n for key in keys:\n exec_property = component.exec_properties[key]\n if not isinstance(exec_property, data_types.RuntimeParameter):\n continue\n component.exec_properties[key] = str(\n dsl.PipelineParam(name=exec_property.name))", "def fill_single_world():\n if not front_is_clear():\n if not right_is_clear():\n if not left_is_clear():\n put_beeper()", "def sgn_inplace(a):", "def copy_param(code, target, source):\n if source[SLOT] != 0:\n code.add(spu.rotqbyi(target, source[REG], source[SLOT] * 4))\n else:\n code.add(spu.ai(target, source[REG], 0))\n return", "def resetTransformations():\n dislin.trfres()", "def resetCoronalSegment(self):\r\n #research\r\n profprint()\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGreen == None :\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n reformatLogic = slicer.vtkSlicerReformatLogic()\r\n #sGreen.SetSliceVisible(0)\r\n sGreen.SetOrientationToCoronal()\r\n #sw = slicer.app.layoutManager().sliceWidget(\"Green\")\r\n #sw.fitSliceToBackground()\r\n sGreen.Modified()", "def solve_nonlinear(self, params, unknowns, resids):\n\n x = hash(params['x'])\n y = params['y']\n\n unknowns['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0", "def _rewrite_concat(self, node: saldag.Concat):\n\n if node.is_lower_boundary():\n\n out_stored_with = node.out_rel.stored_with\n for par in node.parents:\n if not par.is_root():\n par.out_rel.stored_with = copy.copy(out_stored_with)\n node.is_mpc = False", "def _identity_placeholder(\n self,\n x: typing.Any,\n ) -> typing.Any:\n return x", "def apply_nonlinear(self, params, unknowns, resids):\n\n y1 = params['y1']\n y2 = params['y2']\n\n resids['x'] = y1 - y2", "def reload_placeholder(update):\n pass", "def QR_apply(X,y,active) :\n assert y.ndim == 2, \"In order to apply householder\"\n \n A = X.copy()\n y = y.copy()\n ix = np.where(active)[0]\n for i,j in enumerate(ix) :\n #print \"loop j:\",j, \"i: \",i\n beta, h = householder(A[i:,j])\n A[i:,j:] = apply_householder(A[i:,j:],beta,h)\n y[i:] = apply_householder(y[i:],beta,h)\n #print \"A: \"\n #print A\n stdout.flush()\n return A", "def make_empty_side(self, side):\n if side == u'right':\n for k,v in self.d.items():\n item = v[0]\n mnemo = ''\n self.d[k] = [item, mnemo]\n\n if side == u'left':\n for k,v in self.d.items():\n item = ''\n mnemo = v[1]\n self.d[k] = [item, mnemo]\n\n self.clear_controls()\n self.set_value(self.n_parent, self.n)", "def reset_boundaries(self):\n self.L = - np.random.uniform(0.0,1.0)\n self.R = self.L + 1.0\n self.Ne = 0.0\n self.Nc = 0.0", "def default_replacement(random, population, parents, offspring, args):\r\n return population", "def special_corner() -> None:\r\n if example[1, 1] == 0: # NW\r\n if conflict_space[0, 0] == conflict_space[0, 2] and conflict_space[2, 0] == conflict_space[0, 0] \\\r\n and conflict_space[0, 0] != 0:\r\n example[0, 0] = 0\r\n safeboard[0, 0] = 0\r\n progress_handler(False, True)\r\n \r\n if example[1, shape-2] == 0: # NE\r\n if conflict_space[0, shape-1] == conflict_space[0, shape-3] and \\\r\n conflict_space[2, shape-1] == conflict_space[0, shape-1] and conflict_space[0, shape-1] != 0:\r\n example[0, shape-1] = 0\r\n safeboard[0, shape-1] = 0\r\n progress_handler(False, True)\r\n \r\n if example[shape-2, shape-2] == 0: # SE\r\n if conflict_space[shape-1, shape-1] == conflict_space[shape-1, shape-3] and \\\r\n conflict_space[shape-1, shape-1] == conflict_space[shape-3, shape-1] \\\r\n and conflict_space[shape-1, shape-1] != 0:\r\n example[shape-1, shape-1] = 0\r\n safeboard[shape-1, shape-1] = 0\r\n progress_handler(False, True)\r\n \r\n if example[shape-2, 1] == 0: # SW\r\n if conflict_space[shape-1, 0] == conflict_space[shape-1, 2] and \\\r\n conflict_space[shape-1, 0] == conflict_space[shape-3, 0] and conflict_space[shape-1, 0] != 0:\r\n example[shape-1, 0] = 0\r\n safeboard[shape-1, 0] = 0\r\n progress_handler(False, True)\r\n if progress_handler(True, False):\r\n conflict_check()", "def reset_R(self):\n self.R = [np.ones((1, 1)) for _ in range(self.L + 2)]\n self.R[-1] = None", "def contract(self):\n self.vertices[-1, :] = self.contracted", "def root_replace(self,node):\r\n self.feature_index = node.feature_index\r\n self.threshold = node.threshold\r\n self.label = node.label\r\n self.left = node.left\r\n self.right = node.right\r\n self.substitute = node.substitute\r\n if node.left is not None and node.right is not None:\r\n node.left.parents.remove(node) if node in node.left.parents else node.left.parents\r\n node.left.parents.append(self) if self not in node.left.parents else node.left.parents\r\n node.right.parents.remove(node) if node in node.right.parents else node.right.parents\r\n node.right.parents.append(self) if self not in node.right.parents else node.right.parents", "def le_inplace(a,b):", "def replace_construct(self, c):\n if self.array_index is not None:\n self.parent_item.construct.args[self.arg_index][self.array_index] = c\n elif self.arg_index is not None:\n self.parent_item.construct.args[self.arg_index] = c\n else:\n raise ValueError(\"Invalid parent\")", "def replace_block(proof, headers_map, interlink_map, block_index):\n\n prevous_block = proof[block_index - 1][0]\n block_hash = prevous_block[36:68]\n block = headers_map[block_hash]\n interlink = list_flatten(interlink_map[block.GetHash()])\n\n block_2 = mine_block(\n block.hashPrevBlock, block.nBits - 1, interlink, hashMerkleRoot=b\"\\x00\" * 32\n )\n return (\n proof[0:block_index]\n + [[block_2.serialize(), proof[block_index][1]]]\n + proof[block_index + 1 :]\n )", "def reset_correlation(r2, polys, poly):\n \n polys[r2] = poly", "def replace_placeholders(self, placeholder_dict):\n\n for placeholder, value in placeholder_dict.items():\n placeholder_wrapped = f\"{self.marker_string}{placeholder}{self.marker_string}\"\n\n if placeholder not in self.unresolved_placeholders:\n self.hd.log.warn(f\"Placeholder {placeholder} not found in sequence.\")\n else:\n self.sequence = self.sequence.replace(f\"{placeholder_wrapped}\", str(value))\n self.unresolved_placeholders.discard(placeholder)", "def rollout(self, node, scratch_game):\n pass", "def _drop_ground_node(self):\n self.laplace = np.concatenate((\n np.concatenate((\n self.laplace[:self.ground_node,:self.ground_node],\n self.laplace[:self.ground_node,self.ground_node+1:]),1),\n\n np.concatenate((\n self.laplace[self.ground_node+1:,:self.ground_node],\n self.laplace[self.ground_node+1:,self.ground_node+1:]), 1)))\n\n self.degree = np.concatenate((\n self.degree[:self.ground_node], \n self.degree[self.ground_node+1:]))", "def _autoplace(self, nodes):\n for node in nodes:\n node.autoplace()", "def replace_initializations(done_replacing, array, mask, replacement_value, initialization_value):\r\n \r\n # sanity check\r\n if np.any(mask) and done_replacing:\r\n raise ValueError('Being given locations to replace and yet told that we are done replacing.')\r\n \r\n # check that the mask and array have the same shape\r\n if array.shape != mask.shape:\r\n raise ValueError('Attempting to replace using a mask shape: {} not equal to the array shape: {}'.format(mask.shape, array.shape))\r\n \r\n # check that the mask only points to locations with initialized values\r\n if np.any(array[mask] != initialization_value):\r\n raise ValueError('Attempting to overwrite a non-initialization value.')\r\n \r\n array[mask] = replacement_value\r\n \r\n done_replacing = np.all(array!=initialization_value)\r\n \r\n return array, done_replacing", "def replace(self):\n if self.removed:\n self.coordinates = [[(self.player * 15 - 15), 0], [(self.player * 15 - 15), 1],\n [(self.player * 15 - 15), 2], [(self.player * 15 - 15), 3]]\n for i in self.coordinates:\n self.collision_boxes.append(rect.Rect(i[0] * 64, i[1] * 64, 64, 64))\n self.removed=False", "def PrepareReceptor(pdb,padding=4,outpath=\"\"):\n print(\"STOP CALLING THIS FUNCTION\")\n exit()\n com = oechem.OEGraphMol()\n ifs = oechem.oemolistream()\n if ifs.open(pdb):\n oechem.OEReadPDBFile(ifs, com)\n ifs.close()\n\n \"\"\"\n Sorry, this requires some explanation. Openeye wasn't recognizing the previously docked ligand, so I tried to find other ways.\n The next blocks of code take our system and split it based on its connected components, for which its REQUIRED that our protein\n only has a single chain. It assumes that the last component is the ligand. It then creates the ligand (lig) and protein (prot)\n as separate molecules. Next, it finds the minimum and maximum 3D coordinates of the current ligand and produces a box around\n it with the specified padding. Finally it uses this box to create a 'receptor' object into which ligands can be docked.\n Only the receptor is returned.\n Openeye's docking shouldn't be this involved, but I couldn't get it to run the typical 'hybrid' docking without error.\n \"\"\"\n oechem.OEDetermineConnectivity(com)\n nparts, connect = oechem.OEDetermineComponents(com)\n if(nparts != 2):\n print(\"ERR in dock_conf::prepareReceptor. PDB doesn't have 2 connected components\")\n exit()\n ## TODO: What is a good way to catch errors?\n # Get apo\n pred = oechem.OEPartPredAtom(connect)\n pred.SelectPart(nparts)\n lig = oechem.OEGraphMol()\n oechem.OESubsetMol(lig, com, pred)\n print(lig)\n \n # Get protein\n pred = oechem.OEPartPredAtom(connect)\n pred.SelectPart(1)\n prot = oechem.OEGraphMol()\n oechem.OESubsetMol(prot, com, pred)\n \n # Get box dimensions by iterating over ligand\n x_min = y_min = z_min = float('inf')\n x_max = y_max = z_max = -float('inf')\n crd = lig.GetCoords()\n print(\"CRD\", crd)\n for atm in crd:\n x,y,z = crd[atm]\n if x < x_min:\n x_min = x\n if y < y_min:\n y_min = y\n if z < z_min:\n z_min = z\n if x > x_max:\n x_max = x\n if y > y_max:\n y_max = y\n if z > z_max:\n z_max = z\n x_min -= padding\n y_min -= padding\n z_min -= padding\n x_max += padding\n y_max += padding\n z_max += padding\n print(x_min,y_min,z_max, y_max)\n # Now prepare the receptor\n receptor = oechem.OEGraphMol()\n box = oedocking.OEBox()\n box.Setup(x_max, y_max, z_max, x_min, y_min, z_min)\n oedocking.OEMakeReceptor(receptor, prot, box)\n \n if not outpath == \"\":\n oedocking.OEWriteReceptorFile(receptor,f'{outpath}/receptor.oeb')\n return receptor", "def revise():", "def identity(self):\r\n self.piDD = {\"[1]\": None}\r\n self.top_node = \"[1]\"\r\n self.dim = 0", "def residLike(self):\n\n # --------------------------------------------------------------------------------------------- #\n # Compute the residuals\n if self.csys == 'GAL':\n # Redo some file computations with this coordinate system\n self.outbinexp = os.path.join(self.workpath, 'BinExpMapGAL'+self.suffix+'.fits')\n self.outbincub = os.path.join(self.workpath, 'BinCubeGAL'+self.suffix+'.fits')\n self.outsrcmap = os.path.join(self.workpath, 'SrcMapsGAL'+self.suffix+'.fits')\n self.outresid = os.path.join(self.workpath, 'ResidGAL'+self.suffix+'.fits')\n self.outresig = os.path.join(self.workpath, 'ResSigmaGAL'+self.suffix+'.fits')\n\n self._gtExpmap()\n self._gtBincube()\n self._gtSrcmap()\n else:\n # Nothing to add\n pass\n \n self._gtBinmap()\n self._gtModel()\n # Create the residual count map (count_map - model_map)\n if not os.path.isfile(self.outresid):\n os.popen(\"farith {} {} {} ops=SUB\".format(self.outbinmap, self.outgtmod,\n self.outresid))\n # Create the sigma-residual map (residual_map/sqrt(model_map))\n if not os.path.isfile(self.outresig):\n os.popen(\"ftpixcalc {} '(a-b)/sqrt(b)' a={} b={}\".format(self.outresig,\n self.outbinmap, self.outgtmod))\n\n # --------------------------------------------------------------------------------------------- #\n # Get the sources to overplot\n srcs = self.getSrc()\n srcs = srcs[(srcs['Separation'] <= 3.) & ([not i.endswith('c') for i in srcs['Name']])]\n # Plot the residuals\n resplt1 = FermiMap()\n resplt1.savepath = self.workpath\n resplt1.image = self.outresig\n resplt1.figname = 'ResSigma.pdf'\n dmin, dmax = np.abs(resplt1.datamin), resplt1.datamax\n resplt1.datamin = - min(dmin, dmax)\n resplt1.datamax = + min(dmin, dmax)\n resplt1.cbarlabel = r'Residual $\\sigma$/pixel'\n resplt1.mapSky()\n resplt1.srcSky(srcs['RA'], srcs['DEC'], srcs['Name'])\n resplt1.save()\n print(\"\\t=== Figure '{}' created ===\".format( os.path.join(resplt1.savepath, resplt1.figname) ))\n\n resplt2 = FermiMap()\n resplt2.savepath = self.workpath\n resplt2.image = self.outresid\n resplt2.figname = 'Residuals.pdf'\n dmin, dmax = np.abs(resplt2.datamin), resplt2.datamax\n resplt2.datamin = - min(dmin, dmax)\n resplt2.datamax = + min(dmin, dmax)\n resplt2.cbarlabel = r'Residual counts/pixel'\n resplt2.mapSky()\n resplt2.srcSky(srcs['RA'], srcs['DEC'], srcs['Name'])\n resplt2.save()\n print(\"\\t=== Figure '{}' created ===\".format( os.path.join(resplt2.savepath, resplt2.figname) ))\n return", "def reset_parameters(self):\n self.apply(ixvr)", "def reset_parameters(self):\n self.apply(ixvr)", "def solvate(self):\n\n pass", "def dirty_squares(self) -> None:\n row = ran(0, self.__squares.__len__() - 1)\n column = ran(0, self.__squares[0].__len__() - 1)\n self.__squares[row][column] = Floor._dirty\n print(\"Ensuciamos el piso y quedo así: \", self.__str__())", "def make_looped(self) -> None:\n self.most_right.right_node = self.most_left\n self.most_left.left_node = self.most_right", "def realizedPL(self, realizedPL):\n\n self._realizedPL = realizedPL", "def block_binding2(x,y):\n res = binding_circular(x,y)\n return res", "def default_replacement(random, population, parents, offspring, args):\n return population", "def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,\n residual, pred, sample_weight):", "def inv_inplace(a):", "def replace(smap):\n def _replace_xducer(step):\n def _replace_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n if x in smap:\n return step(r, smap[x])\n else:\n return step(r, x)\n return _replace_step\n return _replace_xducer", "def prepare_stars_that_need_bg_ols():\n\n wanted = Table.read(\n '/priv/mulga1/marusa/chronostar_projects/solar_neighbourhood/data/ScoCen_box_result_15M_ready_for_bg_ols.fits')\n\n old = Table.read('../scocen/data/data_table_cartesian_including_tims_stars_with_bg_ols_and_component_overlaps.fits')\n old_scocen = Table.read(\n '/priv/mulga1/marusa/chronostar_projects/scocen/data/scocen_candidates_300k_only_spatial_cut.fits')\n\n old_solar_neighbourhood_bg_ols = np.loadtxt('bgols_multiprocessing_0.dat')\n wanted0 = wanted[:len(old_solar_neighbourhood_bg_ols)]\n\n # DELETE THIS!!!\n wanted = wanted[len(old_solar_neighbourhood_bg_ols):]\n\n mask = np.in1d(wanted['source_id'], old['source_id'])\n mask = np.logical_or(mask, np.in1d(wanted['source_id'], old_scocen['source_id']))\n # ~ mask = np.logical_or(mask, np.in1d(wanted['source_id'], old_solar_neighbourhood['source_id']))\n\n # Looking for stars that do NOT have bg ols yet\n mask = ~mask\n\n todo = wanted[mask]\n print\n len(todo)\n print\n len(old), len(wanted), len(wanted) - len(old)\n\n todo.write('solar_neighbourhood_determine_bg_ols_for_these_stars.fits', format='fits')", "def replace_none(ret_st, pattern):\n curr_none = [i for i in range(len(fk_array)) if ret_st[i] == 'unmapped-none']\n while curr_none:\n temp_curr_none = curr_none[:MGET_CHUNK]\n curr_none = curr_none[MGET_CHUNK:]\n vals_array = rdb.mget([pattern.format(str(fk_array[i]).upper(), taxid, hint)\n for i in temp_curr_none])\n for i, val in zip(temp_curr_none, vals_array):\n if val is None:\n continue\n ret_st[i] = val.decode()", "def resnext34(**kwargs):\n model = ResNeXt(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def new_iteration(self):\n if (\n self.inner_solutions is not None\n and self.inner_solutions.size(0) > self.raw_samples\n ):\n indices = torch.randperm(n=self.inner_solutions.size(0))[: self.raw_samples]\n self.inner_solutions = self.inner_solutions[indices]\n self.inner_values = self.inner_values[indices]", "def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n y = params['y']\n\n unknowns['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0", "def xor_inplace(a,b):", "def test_remap_partial(self):\n # the order here is CCO\n ethanol = create_ethanol()\n # Create partial map to swap first two atoms\n partial_map = {0: 1, 1: 0}\n # Create equivalent total map\n total_map = {0: 1, 1: 0, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8}\n\n remapped_ethanol_partial = ethanol.remap(\n partial_map,\n current_to_new=True,\n partial=True,\n )\n remapped_ethanol_total = ethanol.remap(\n total_map,\n current_to_new=True,\n partial=False,\n )\n\n # check all of the properties match as well, torsions and impropers will be in a different order\n # due to the bonds being out of order\n self.assert_molecules_match_after_remap(\n remapped_ethanol_partial,\n remapped_ethanol_total,\n )", "def main(template_initial_path, template_grown_path, step, total_steps, hydrogen_to_replace, core_atom_linker,\n tmpl_out_path, null_charges=False, growing_mode=\"SoftcoreLike\"):\n lambda_to_reduce = float(step/(total_steps+1))\n templ_ini = TemplateImpact(template_initial_path)\n \n for bond in templ_ini.list_of_bonds:\n key, bond_cont = bond\n templ_grw = TemplateImpact(template_grown_path)\n fragment_atoms, core_atoms_in, core_atoms_grown = detect_atoms(template_initial=templ_ini, \n template_grown=templ_grw,\n hydrogen_to_replace=hydrogen_to_replace)\n set_fragment_atoms(list_of_fragment_atoms=fragment_atoms)\n set_connecting_atom(template_grown=templ_grw, pdb_atom_name=core_atom_linker)\n fragment_bonds = detect_fragment_bonds(list_of_fragment_atoms=fragment_atoms, template_grown=templ_grw)\n set_fragment_bonds(list_of_fragment_bonds=fragment_bonds)\n set_linker_bond(templ_grw)\n if growing_mode == \"SoftcoreLike\":\n modify_core_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, exp_charges=True,\n null_charges=null_charges)\n reduce_fragment_parameters_linearly(templ_grw, lambda_to_reduce, exp_charges=True, \n null_charges=null_charges)\n \n modify_linkers_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, hydrogen_to_replace)\n elif growing_mode == \"AllLinear\":\n modify_core_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, exp_charges=False)\n reduce_fragment_parameters_linearly(templ_grw, lambda_to_reduce, exp_charges=False,\n null_charges=False)\n modify_linkers_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, hydrogen_to_replace)\n elif growing_mode == \"SpreadHcharge\":\n if step > 1:\n reduce_fragment_parameters_originaly(templ_grw, templ_ini, lambda_to_reduce, \n hydrogen=hydrogen_to_replace, n_GS=total_steps)\n modify_linkers_parameters_linearly(templ_grw, lambda_to_reduce, templ_ini, hydrogen_to_replace)\n else:\n reduce_fragment_parameters_spreading_H(templ_grw, templ_ini, lambda_to_reduce, \n hydrogen=hydrogen_to_replace, n_GS=total_steps)\n else:\n raise ValueError(\"Growing mode Not valid. Choose between: 'SoftcoreLike', 'SpreadHcharge', 'AllLinear'.\")\n templ_grw.write_template_to_file(template_new_name=tmpl_out_path)\n return [atom.pdb_atom_name for atom in fragment_atoms], \\\n [atom.pdb_atom_name for atom in core_atoms_grown]", "def compute_partials(self, inputs, partials):\n partials['y', 'x'] = 2.0", "def test_rewrite_chains_stub(self):\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n )\n self.step_actor(self.ipt)\n self.assertEqual(self.stub.chains_contents,\n {\"foo\": [\"--append foo --jump bar\"],\n 'bar': [MISSING_CHAIN_DROP % \"bar\"]})", "def redefineProblem(self):\n self.formulation = cp.Problem(self.obj, self.constraints)", "def redefineProblem(self):\n self.formulation = cp.Problem(self.obj, self.constraints)", "def free_curvature(self) -> None:\n self.n1.free = True\n self.n2.free = True", "def repair(self):\n self.dot = self.dot.replace('()', '..').replace('(.)', '...').replace('(..)', '....').replace('(...)', '.....')\n self.matrix = pair_matrix(self)\n length = len(self.seq)\n for x in range(length):\n for y in range(x, length):\n if self.matrix[x, y] == 1:\n if not is_pair_allowed(self.seq[x], self.seq[y]):\n self.dot = self.dot[:x] + '.' + self.dot[x + 1:y] + '.' + self.dot[y + 1:]\n return self", "def replace(self, csys, whichin, whichout):\n return _coordsys.coordsys_replace(self, csys, whichin, whichout)", "def prepare_squeezed_state(self, r, phi, mode):\n self.circuit.prepare_mode_squeezed(r, phi, self._remap_modes(mode))", "def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n a = self.a\n b = self.b\n c = self.c\n\n unknowns['y'] = a*x**2 + b*x + c", "def fixC(self,i,value):\n if self.coeffPattern[2] == None:\n m,n=self.m,self.n\n self.coeffPattern[2] = [None]*m\n self.coeffPattern[2][i]=value\n self._updateEstimatorSize(i)", "def residual_block(x,index,step=\"encoding\"):\r\n if step != \"encoding\":\r\n index = \"decode_{}\".format(index)\r\n \r\n x_new = kl.Conv2D(128,(3,3),padding=\"same\",activation=\"selu\",kernel_initializer=initializer,\r\n name=\"conv1_res_{}\".format(index))(x)\r\n \r\n x_new = kl.Conv2D(128,(3,3),padding=\"same\",kernel_initializer=initializer,\r\n name=\"conv2_res_{}\".format(index))(x_new)\r\n \r\n x_out = kl.Add()([x,x_new])\r\n x_out = kl.Activation(\"relu\")(x_out)\r\n return(x_out)", "def fixA(self,i,j,value):\n if self.coeffPattern[0] == None:\n m,n=self.m,self.n\n self.coeffPattern[0] = [[None]*m for i in range(m)]\n self.coeffPattern[0][i][j]=value\n self._updateEstimatorSize(i)", "def __init__(self, left, right):\n super(compositeORGenerator,self).__init__()\n self._left = left\n self._right = right", "def exact(self, roots):\n self.tensornet = Circuit(self.nspins + self.nmagnons)\n for n in range(self.nmagnons):\n self.tensornet.add(gates.X(n))\n for i, m in enumerate(range(self.nmagnons, 0, -1)):\n for n in range(self.nspins):\n self.tensornet.add(gates.Unitary(self._r_matrix_xxz(roots[i]), n + m - 1, n + m))\n return self.tensornet", "def simplify_plaquettes(self, model, dataset=None):\n for x in self.xvals():\n for y in self.yvals():\n p = self.get_plaquette(x, y)\n if p is not None:\n p.simplify_circuits(model, dataset)", "def reset(self):\n rng, seed = seeding.np_random()\n\n taxi_locs = [0, 1, 2, 3, 4]\n pass_dest_locs = [0, 1, 2, 3]\n\n taxi_row, taxi_column = rng.choice(taxi_locs, size=2, replace=True)\n pass_loc, dest_loc = rng.choice(pass_dest_locs, size=2, replace=self.replace)\n self.s = self.encode(taxi_row, taxi_column, pass_loc, dest_loc)\n return self.s" ]
[ "0.5215274", "0.5188521", "0.5135618", "0.5071695", "0.50464606", "0.50296235", "0.49938515", "0.49912375", "0.49813616", "0.497628", "0.4964148", "0.49436188", "0.49054703", "0.48990193", "0.48943797", "0.48934063", "0.4865207", "0.48245913", "0.48215088", "0.4818732", "0.47991067", "0.47872424", "0.47872424", "0.47816545", "0.47618523", "0.4759671", "0.47588813", "0.47488555", "0.47410396", "0.47406933", "0.4737861", "0.4735653", "0.47312078", "0.4727438", "0.47207984", "0.47187725", "0.47166505", "0.47068605", "0.46856725", "0.46822357", "0.46802393", "0.46738943", "0.46594378", "0.46580237", "0.46576658", "0.46524048", "0.46405834", "0.46368733", "0.46346867", "0.46344247", "0.4621608", "0.46211284", "0.4620595", "0.46173373", "0.46156", "0.45949277", "0.45915538", "0.45890567", "0.45872334", "0.4577837", "0.45764178", "0.45722622", "0.45706347", "0.456314", "0.45628184", "0.45605236", "0.4559221", "0.4559221", "0.45559177", "0.45537853", "0.4551115", "0.45445156", "0.45421332", "0.45277455", "0.45209113", "0.45188028", "0.45149088", "0.45142126", "0.45133343", "0.4511907", "0.45057532", "0.45018739", "0.44917133", "0.448987", "0.4486171", "0.44804305", "0.44794092", "0.44765422", "0.44765422", "0.44741058", "0.44689384", "0.44681802", "0.44608694", "0.4457899", "0.44574195", "0.44521973", "0.44492778", "0.44487348", "0.4448511", "0.44479686", "0.4447557" ]
0.0
-1
Construct a heap from a list of elements with priorities. Each element of the list must be in the form (Item, Priority).
def construct_heap(self, elems): for e in elems: self.n += 1 self.A.append(e) self.pos[e[0]] = self.n for i in range(self.n // 2, 0, -1): self.combine(i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heap_sort(list):\n pass", "def build_heap(self, items):\n for key in items:\n self.insert(key)", "def __init__(self, list = []):\n # initialize empty heap\n self.heap = []\n\n # initialize heap with provided list\n for element in list:\n self.add(element)", "def heap_sort(items):\n heapq.heapify(items)\n items[:] = [heapq.heappop(items) for i in range(len(items))]", "def heap_sort(items):\n heapq.heapify(items)\n items[:] = [heapq.heappop(items) for i in range(len(items))]", "def build_heap(data):\n n = len(data) # elements 0 .. n-1\n swaps = []\n def swap(i, j):\n t = data[i]\n data[i] = data[j]\n data[j] = t\n swaps.append((i,j))\n def sift_down(i):\n # 3-way comparison to restore heap property to i\n new_i = i\n l = left(i); r = right(i)\n if l < n and data[l] < data[new_i]: new_i = l\n if r < n and data[r] < data[new_i]: new_i = r\n if not i == new_i:\n # i did not satsify heap property, swap and carry on down\n swap(i, new_i)\n sift_down(new_i)\n # starting from end, parent of n-1 is first that may break heap condition\n for i in range(parent(n - 1), -1, -1):\n sift_down(i)\n return swaps", "def _heapify(self):\n for _ in range(len(self.elements)):\n for i in range(len(self.elements)-1, 0, -1):\n parentPosition = (i-1)/2 # defaults to int i.e. 7/2=3, and 6/2=3\n if parentPosition < 0:\n parentPosition = 0\n \n # change this condition to '>' if coding for max-heap. This is for min-heap.\n if self.elements[i] < self.elements[parentPosition]:\n self.elements[i], self.elements[parentPosition] = self.elements[parentPosition], self.elements[i]", "def heapify(x):\n pass", "def build_heap(arr):\n for i in range((len(arr)//2), -1, -1):\n heapify(arr,index=i, size=len(arr)-1)", "def build_heap(arr):\n for i in range(len(arr)-1, -1, -1):\n down_heapify(arr, len(arr), i)", "def buildHeap(A):\n n = len(A)\n for i in range(n/2-1, -1, -1):\n heapify(A, i, n)", "def construct_max_heap(self, lst):\n self.heap_list = lst\n #start compare node\n node = (len(self.heap_list)-2)/2\n while node >= 0:\n self.sift_down(node, len(self.heap_list)-1)\n node -= 1", "def build_heap(data: List[int]) -> List[Tuple[int, int]]:\n swaps: List[Tuple[int, int]] = []\n\n n = len(data)\n start = ceil(n/2) - 1\n for i in range(start, -1, -1):\n swaps = sink_down(i, data, swaps)\n\n return swaps", "def heapify(self, l):\n if not l:\n return\n self.h = [None]\n for i in xrange(0, len(l)):\n self.push(l[i])", "def heapify(self, l):\n if not l:\n return\n self.h = [None]\n for i in xrange(0, len(l)):\n self.push(l[i])", "def heap_sort(array):\n p = PriorityHeap(min == False)\n n = len(array)\n for i in range(n):\n p.push(array[i], array[i])\n for j in range(n - 1, -1, -1):\n item = p.pop().value\n array[j] = item\n return array", "def heap_sort(alist: list, key=None) -> list:\n newList = List()\n hp = BinaryHeap(func=key)\n\n for item in alist:\n hp.heappush(item)\n\n for _ in range(len(alist)):\n newList.append(hp.heappop())\n\n return newList", "def _create_priorities(self, pri):\n heaps = self.priorities\n heaps[pri] = MinBinaryHeap()", "def __init__(self, items=[]):\n self.set = dict((item, True) for item in items)\n self.heap = self.set.keys()\n heapq.heapify(self.heap)", "def testArbitraryItems(self):\n hd = HeapDict(size=2)\n item1 = self.PriorityItem(1.0, [None, 'Arbitrary item'])\n item2 = self.PriorityItem(2.0, {'Another item'})\n item3 = self.PriorityItem(3.0, (1, 'Third item'))\n item4 = self.PriorityItem(4.0, 0)\n hd.push(1, item1)\n hd.push(1, item3)\n hd.push(1, item2)\n hd.push(1, item4)\n self.assertEqual(hd.get_result(), {1: [item4, item3]})", "def sort(items):\n heapq.heapify(items)\n items[:] = [heapq.heappop(items) for i in range(len(items))]", "def test_priority_que_success_priority_multiple(priority_queue):\n priority_queue.insert(20)\n priority_queue.insert(5)\n priority_queue.insert(100, 5)\n priority_queue.insert(10, 2)\n priority_queue.insert(50, 1)\n assert priority_queue._heap[0].value == 50", "def build_heap(self, arr):\n i = len(arr) // 2\n self.size = len(arr)\n self.heap_list = [-1] + arr[:]\n while i > 0:\n self.percolate_down(i)\n i = i - 1", "def build_max_heap(self, list_to_be_heap):\n self.heaplist = self.heaplist + list_to_be_heap\n self.currentsize = len(list_to_be_heap)\n\n # as it follow properties of complete binary tree, non leaf nodes will end to total size / 2\n index = self.currentsize // 2\n\n # > 0 : to ignore first element of the array which is 0..\n while index > 0:\n self.shift_item_down(index)\n index -= 1", "def heap_sort(l):\r\n h = SMinHeap()\r\n for el in l:\r\n h.push(el)\r\n sorted_list = [h.pop() for x in range(len(h.array))]\r\n return sorted_list", "def heapsort(self) -> Generator[T, None, None]:\n h = [e for e in self.priority_queue]\n while h:\n entry = heapq.heappop(h)[-1][0]\n if entry is not None:\n yield cast(T, entry)", "def make_heap(self, frequency):\n\n\n\t\t\tfor key in frequency:\n\t\t\t\tnode = self.HeapNode(key, frequency[key])#instaciamos un nodo con el valor y frecuencia\n\t\t\t\theapq.heappush(self.heap, node)#agregamos el nodo al priority queue", "def __init__(self, values=[]):\n self.priority_queue = {}\n if isinstance(values, list):\n try:\n for value, priority in values:\n self.insert(value, priority)\n except ValueError:\n raise TypeError(\"You need to tuplize your priorities\")\n else:\n raise TypeError(\"Put your items in a list\")", "def build_heap(data):\n # The following naive implementation just sorts the given sequence\n # using selection sort algorithm and saves the resulting sequence\n # of swaps. This turns the given array into a heap, but in the worst\n # case gives a quadratic number of swaps.\n #\n data_ = [0] * (len(data) + 1)\n data_[1:] = data\n n = len(data)\n swaps = []\n for i in reversed(range(n // 2 + 1)):\n if i == 0:\n break\n sift_down(data_, i, swaps)\n\n return swaps", "def build_heap(self, A: list):\n self.size = len(A)\n med = (self.size // 2) - 1 #Mid point of array\n for i in range(0, med + 1): #Reverse iteration\n self.heapify(A, med - i) #Reverse iteration", "def insert_elements_to_heap(heap, elements):\n for element in elements:\n heap.insert(element)", "def buildHeap(self, alist):\n i = len(alist) // 2\n self.currentSize = len(alist)\n self.heapList = [0] + alist[:]\n while (i>0):\n self.percDown(i)\n i = i - 1", "def getPriorityList(self):\r\n simple_list = [(0, self.s)]\r\n if self.priority == \"fib\":\r\n fib_heap = makefheap()\r\n fheappush(fib_heap, simple_list[0])\r\n return fib_heap\r\n return simple_list", "def build_max_heap(A):\n A.insert(0, len(A))\n for i in range(len(A)//2, 0, -1):\n max_heapify(A, i)", "def build_heap(data):\n size = len(data)\n for i in range(size//2, -1,-1):\n shiftDown(data, i)", "def enqueue(self, item, priority):\n # TODO: Insert given item into heap\n ...", "def build_heap(data):\n # The following naive implementation just sorts the given sequence\n # using selection sort algorithm and saves the resulting sequence\n # of swaps. This turns the given array into a heap, but in the worst\n # case gives a quadratic number of swaps.\n #\n # TODO: replace by a more efficient implementation\n swaps = []\n for position in range(len(data)//2, 0, -1):\n curr = position - 1 \n while curr < len(data):\n # print(\"curr:\", curr, data[curr])\n left = 2*curr + 1\n right = 2*curr + 2\n min_index = curr\n if (left<len(data)) and (data[min_index] > data[left]):\n min_index = left\n if (right<len(data)) and (data[min_index] > data[right]):\n min_index = right\n \n if min_index != curr:\n swaps.append((curr, min_index))\n data[curr], data[min_index] = data[min_index], data[curr]\n curr = min_index\n print(data)\n else:\n # print(\"break==>\", data)\n break\n \n # print(data)\n\n return swaps", "def build_max_heap(a):\r\n for i in range(math.floor((len(a) - 1)/2), -1, -1):\r\n max_heapify(a, i)", "def __init__(self, items=None):\n\n if items is None:\n items = []\n self.set = dict((item, []) for item in items)\n self.heap = list(self.set.keys())\n hpq.heapify(self.heap)\n self.counter = itertools.count()", "def example_eight():\n a = []\n heapq.heappush(a, 5)\n heapq.heappush(a, 3)\n heapq.heappush(a, 7)\n heapq.heappush(a, 4)\n\n assert a[0] == heapq.nsmallest(1, a)[0] == 3\n\n print('Before:', a)\n a.sort()\n print('After: ', a)", "def __init__(self, value = None):\n if value == None:\n self.ar = []\n else:\n self.ar = list(value)\n self.n = (len(self.ar))\n\n start = self.n//2 - 1\n for i in range(start, -1, -1):\n self.heapify(i)", "def heapsort(A):\n \n buildHeap(A)\n for i in range(len(A)-1, 0, -1):\n A[0],A[i] = A[i],A[0]\n heapify(A, 0, i)", "def heapsort(iterable):\n queue = []\n\n [heapq.heappush(queue, item) for item in iterable]\n\n return [heapq.heappop(queue) for i in range(len(queue))]", "def heapify(seq):\n minheap = [0] + seq\n for i in range(len(seq)//2, 0, -1): #len(seq)//2 -= 1 to index 1\n minHeapify(minheap, i, seq)\n seq[:] = minheap[1:]\n return seq", "def push(self, priority: float, item):\n heappush(self._heap, (-1 * priority, item))", "def heap_sort(arr):\n if not isinstance(arr, list) or len(arr) == 0:\n return\n for i in range(len(arr) // 2 - 1, -1, -1):\n heapify(arr, i, len(arr))\n for i in range(len(arr) - 1, 0, -1):\n arr[0], arr[i] = arr[i], arr[0]\n heapify(arr, 0, i)", "def __init__(self, x):\n self.elements = x\n self._heapify()", "def build_max_heap(A):\r\n i = int((len(A)-2)//2)\r\n while i >= 0:\r\n max_heapify(A, i)\r\n i -= 1\r\n return A", "def __init__(self, it=tuple()):\n self.__heap = [list(x) for x in it]\n heapify(self.__heap)\n self.__position = {key: i for i, (_, key) in enumerate(self.__heap)}", "def build_heap(self, alist):\r\n if len(alist) > self.capacity:\r\n return False\r\n else:\r\n i = len(alist) // 2\r\n self.size = len(alist)\r\n self.items = [0] + alist[:] + [None]*(self.capacity+1-len(alist))\r\n while (i > 0):\r\n self.perc_down(i)\r\n i = i - 1\r\n return True", "def heapSort(lst):\n heap = createEmptyHeap(len(lst), less)\n for n in lst:\n add(heap, n)\n sortedLst = []\n while heap.size > 0:\n sortedLst.append(removeMin(heap))\n return sortedLst", "def heap_sort(arr):\n build_heap(arr)\n for i in range(len(arr) -1 , -1, -1):\n arr[i], arr[0] = arr[0], arr[i]\n \"\"\"Here we swap current element with root as it is already sorted and apply heapify. Index = 0 represents root\"\"\"\n heapify(arr, 0, i)", "def build_heap(self):\n n = int((len(self.array) / 2) - 1)\n\n while n >= 0:\n self.heapify_top_down(n)\n n -= 1", "def build_max_heap(x):\n def cond_func(state):\n x,c,p = state\n return jnp.logical_and(x[c] > x[p], c > 0)\n\n def body_func(state):\n x,c,p = state\n xc = x[c]\n xp = x[p]\n x = x.at[c].set(xp)\n x = x.at[p].set(xc)\n c = p\n p = (p - 1) >> 1\n return x, c, p\n\n def main_body(i, x):\n # parent index\n p = (i - 1) >> 1\n # heapify\n x, _, _ = lax.while_loop(cond_func, body_func, (x, i, p))\n return x\n\n return lax.fori_loop(1, len(x), main_body, x)", "def heap_sort(collection):\n n = len(collection)\n for i in range(n // 2 - 1, -1, -1):\n heapify(collection, i, n)\n for i in range(n - 1, 0, -1):\n collection[0], collection[i] = collection[i], collection[0]\n heapify(collection, 0, i)\n return collection", "def test_push_pop():\n\n q = PriorityQueue()\n\n # input list (obj, priority) should be reversed\n # in the priority_queue\n input_list = [((1), 9), ((2), 8), ((3), 7),\n ((4), 6), ((5), 5), ((6), 4),\n ((7), 3), ((8), 2), ((9), 1)]\n\n # insert the items in the queue\n for obj, p in input_list:\n q.push(obj, p)\n\n # pop the items into another list\n output = []\n while q._queue:\n output.append(q.pop())\n\n # make sure it lines up with expected result\n eq_(output, range(1, 10)[::-1])", "def heap_sort_nip(A):\n build_max_heap(A)\n res = []\n for i in range(len(A) - 1, 1, -1):\n res.insert(0, A[1])\n A[1], A[i] = A[i], A[1]\n del A[i]\n A[0] -= 1\n max_heapify(A, 1)\n A[:] = [A[1]] + res", "def heapSort(sequence):\n _buildHeap(sequence)\n for i in range(len(sequence) - 1, 0, -1):\n sequence[0], sequence[i] = sequence[i], sequence[0]\n _shiftDown(sequence, i - 1, 0)", "def create_prio_queue(img, descriptors):\n q = PriorityQueue()\n imgDesc = create_descriptor(img)\n for i, d in descriptors:\n dist = distance(imgDesc, d)\n q.put((dist, i))\n return q", "def test_insert_increases_size(sample_priorityq):\n assert len(sample_priorityq.heap_list) == 0\n sample_priorityq.insert([5, 1])\n assert len(sample_priorityq.heap_list) == 1\n sample_priorityq.insert([6, 2])\n assert len(sample_priorityq.heap_list) == 2", "def heapify(self, A: list, i: int):\n left = 2*i\n right = (2*i) + 1\n heapIndex = i\n\n if self.isMax == True:\n #Max heap\n if left < self.size and A[left] > A[heapIndex]:\n heapIndex = left #New largest\n if right < self.size and A[right] > A[heapIndex]:\n heapIndex = right #New largest\n else:\n #Min heap\n if left < self.size and A[left] < A[heapIndex]:\n heapIndex = left #New smallest\n if right < self.size and A[right] < A[heapIndex]:\n heapIndex = right #New smallest\n \n if heapIndex != i:\n A[i], A[heapIndex] = A[heapIndex], A[i] #Swap heap index with current iteration index\n self.heapify(A, heapIndex)", "def heap_sort(self, A):\n pass", "def heap_sort(self, A):\n pass", "def __init__(self):\n self.lower_q = [] # max heap\n self.higher_q = [] # min heap", "def __init__(self, iterable=None):\n self.heap = []\n if iterable is not None:\n for item in iterable:\n self.push(item)", "def priority_queue():\n from src.priorityq import PriorityQueue\n priority_queue = PriorityQueue()\n return priority_queue", "def __init__(self):\n self.max_heap = [] # to contain left smaller half, or + 1\n self.min_heap = [] # to contain right bigger half", "def prepare_heap(graph, total_order):\n\n H = binheap(graph.Vertices, total_order=total_order)\n index = 0\n for vertice in H:\n vertice.heap_index = index\n index += 1\n\n return H", "def build_max_heap(heap):\n\tfor j in range(heap.len//2, -1, -1):\n\t\tmax_heapify(heap, j)", "def priority_queue_full():\n from src.priorityq import PriorityQueue\n priority_queue = PriorityQueue()\n priority_queue.insert(15, 5)\n priority_queue.insert(12, 3)\n priority_queue.insert(11, 1)\n priority_queue.insert(6, 2)\n priority_queue.insert(17)\n priority_queue.insert(3)\n return priority_queue", "def __init__(self, seq=None, order='max'):\n if order == 'max':\n self._comparison = self._compare_max\n elif order == 'min':\n self._comparison = self._compare_min\n else:\n raise ValueError\n\n if not seq:\n self._heap_list = []\n else:\n self._heap_list = list(seq)\n self._build_heap()", "def heap_sort(A):\n build_max_heap(A)\n for i in range(len(A) - 1, 1, -1):\n A[1], A[i] = A[i], A[1]\n A[0] -= 1\n max_heapify(A, 1)\n del A[0]", "def min_heap(self): \n \n for pos in range(self.size//2, 0, -1): \n self.min_heapify(pos)", "def heapsort(a):\r\n\r\n build_max_heap(a)\r\n\r\n for i in range(len(a) - 1, 0, -1):\r\n # exchange a[0] with a[i]\r\n a[0], a[i] = a[i], a[0]\r\n a.heap_size = a.heap_size - 1\r\n max_heapify(a, 0)\r\n\r\n a.heap_size = len(a)", "def heap_sort(unsorted):\n heap = BinaryMinHeap()\n\n # add items to heap\n for item in unsorted:\n heap.heap_push(item)\n\n # new list for things to go in\n sorted_list = [0] * (heap.get_size())\n\n # add sorted elements\n for position in range(heap.get_size()):\n sorted_list[position] = heap.pop_min()\n return sorted_list", "def main():\n first_element = PriorityQueueNode(\"first_element\", 1)\n second_element = PriorityQueueNode(\"second_element\", 2)\n third_element = PriorityQueueNode(\"third_element\", 3)\n fourth_element = PriorityQueueNode(\"fourth_element\", 4)\n fifth_element = PriorityQueueNode(\"fifth_element\", 5)\n\n my_priority_queue = PriorityQueue([first_element, second_element, third_element, fourth_element, fifth_element])\n\n print(f\"Initial priority queue: {my_priority_queue.list_elements()}\")\n my_priority_queue.build_max_heap()\n print(f\"My priority queue after building max_heap: {my_priority_queue.list_elements()}\")\n\n my_priority_queue.heap_increase_key(3, 9)\n print(f\"My priority queue after building heap_increase_key: {my_priority_queue.list_elements()}\")\n\n sixth_element = PriorityQueueNode(\"sixth_element\", 195)\n\n my_priority_queue.max_heap_insert(sixth_element)\n print(f\"My priority queue after building max_heap_insert: {my_priority_queue.list_elements()}\")", "def __init__(self, *values, cmp=DEFAULT_CMP):\n\n self.cmp = cmp\n\n if len(values) == 0:\n self.a = []\n return\n\n x = values[0]\n if not is_series(x):\n self.a = self.build_heap([x])\n return\n\n assert is_series(x)\n assert len(x) > 0\n\n self.a = self.build_heap(x)", "def __init__(self, *args):\n this = _libsbml.new_Priority(*args)\n try: self.this.append(this)\n except: self.this = this", "def add(self, item, priority=0):\n if item in self.set:\n self.remove(item)\n count = next(self.counter)\n entry = [priority, count, item]\n self.set[item] = entry\n hpq.heappush(self.heap, entry)", "def push_pop(self, item, priority):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Replace and return min item from heap, if any\n ...", "def example_seven():\n a = []\n heapq.heappush(a, 5)\n heapq.heappush(a, 3)\n heapq.heappush(a, 7)\n heapq.heappush(a, 4)\n\n print(heapq.heappop(a), heapq.heappop(a), heapq.heappop(a), heapq.heappop(a))", "def test_priority_que_success_priority(priority_queue):\n priority_queue.insert(10)\n priority_queue.insert(5)\n priority_queue.insert(100, 1)\n priority_queue.insert(10, 1)\n assert priority_queue._heap[0].value == 100", "def list_elements(self):\n return [(v.data, v.priority_key) for v in self.heap]", "def __init__(self):\n self.min_heap = []\n self.max_heap = []", "def build_max_heap(self, A):\n for i in range(len(A)/2, -1, -1):\n self.max_heapify(A, i)", "def build_max_heap(self, A):\n for i in range(len(A)/2, -1, -1):\n self.max_heapify(A, i)", "def _heapify(self):\n start = self._parent(len(self) - 1)\n for i in range(start, -1, -1):\n self._down_heap(i)", "def __init__(self, collection: Optional[list]=None):\n if isinstance(collection, list):\n super().__init__(collection=[])\n # If a valid collection is provided,\n # construct the min heap with the collection\n for value in collection:\n self.insert(value)\n elif collection is None:\n super().__init__(collection=[])\n else:\n raise TypeError('Argument \\'collection\\' must be of type list.')", "def test_pop_decreases_size(sample_priorityq):\n for i in range(5):\n sample_priorityq.insert([i, i + 3])\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 4\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 3\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 2", "def __init__(self):\n # max_heap stores smaller half\n # min_heap stores larger half\n self.heaps = ([], [])", "def __init__(self):\n self.minheap = []\n self.maxheap = []", "def __init__(self):\n self.max_heap = list()\n self.min_heap = list()", "def manage_heap(heap, coordinates, distance):\n\tif distance > SUN_DISTANCE:\n\t\tif len(heap) < k:\n\t\t\theap.append((distance, coordinates))\n\t\t\tif len(heap) == k:\n\t\t\t\theapq._heapify_max(heap)\n\t\telif distance < heap[0][0]:\n\t\t\theapq._heappushpop_max(heap, (distance, coordinates))", "def test_priority_que_success_multiple_empty(priority_queue):\n priority_queue.insert(15)\n priority_queue.insert(13, 1)\n assert (priority_queue._heap[0].value,\n priority_queue._heap[0].priority,\n priority_queue._heap[1].value) == (13, 1, 15)", "def heap_sort(self, data, draw, speed):\n \n # building max-heap\n # first index of a non-leaf node → len(data)//2 - 1 \n for i in range(len(data) // 2 - 1, -1, -1):\n self.heapify(data, len(data), i)\n \n # extract elements (remove root and heapify)\n for i in range(len(data)-1, 0, -1):\n \n # swap root with last element\n data[i], data[0] = data[0], data[i]\n \n # heapify root\n self.heapify(data, i, 0)\n draw(data, [\"Orange\" if x == i or x == self.largest else \"#a871e3\" for x in range(len(data))])\n time.sleep(speed)", "def max_heapify(self, i):\n largest, left_index, right_index = i, 2*i+1, 2*i+2\n current_length = self.heap_size\n\n if (left_index < current_length) and (self.heap[left_index].priority_key > self.heap[largest].priority_key):\n largest = left_index\n\n if (right_index < current_length) and (self.heap[right_index].priority_key > self.heap[largest].priority_key):\n largest = right_index\n\n if largest != i:\n self.heap[largest], self.heap[i] = self.heap[i], self.heap[largest]\n self.max_heapify(largest)\n return self.heap", "def add(self, item, priority=0) -> None:\n if item in self.entry_finder:\n self.remove(item)\n count = next(self.counter)\n entry = (priority, count, [item])\n self.entry_finder[item] = entry\n heapq.heappush(self.priority_queue, entry)", "def heapify(array):\n # Start by sifting down the first parent node\n n = len(array)\n node = (n - 2) // 2\n\n # Sift down all nodes, finishing with the root\n while node >= 0:\n sift_down(array, node, n)\n node -= 1", "def heap_push_max(heap, item):\n heap.append(item)\n heapq._siftdown_max(heap, 0, len(heap)-1)", "def change_priority(self, elem, prio):\n pos = self.pos[elem]\n currPrio = self.A[pos][1]\n self.A[pos] = (elem, prio)\n if self.cmpFn(prio, currPrio):\n self.insert_loop(pos, pos // 2) # Up heapify\n else:\n self.combine(pos) # Down heapify" ]
[ "0.6715845", "0.6644555", "0.66232336", "0.6562983", "0.6562983", "0.6440951", "0.64229244", "0.64208883", "0.64108974", "0.63691944", "0.6351629", "0.63361603", "0.632387", "0.6306449", "0.6306449", "0.62739", "0.62633675", "0.62222654", "0.6173713", "0.6163355", "0.61413926", "0.6126718", "0.61000144", "0.6086264", "0.6077235", "0.607588", "0.60739946", "0.60594726", "0.60227156", "0.6021205", "0.60189664", "0.59884334", "0.5938065", "0.59224546", "0.5910738", "0.5903463", "0.58779675", "0.5868437", "0.5864319", "0.58638173", "0.5854473", "0.58531755", "0.5848573", "0.583748", "0.5827706", "0.5825675", "0.5821898", "0.578059", "0.5745481", "0.5740721", "0.567334", "0.5668849", "0.5652272", "0.5640275", "0.56401336", "0.5630376", "0.5583871", "0.55764425", "0.5575312", "0.55665237", "0.55664915", "0.5564297", "0.5564297", "0.5563892", "0.5560722", "0.5555503", "0.5550977", "0.5532965", "0.55269694", "0.5516804", "0.5513786", "0.55019027", "0.5494289", "0.5480641", "0.5474243", "0.5458129", "0.5453677", "0.54486877", "0.54393494", "0.5438405", "0.5436442", "0.54254746", "0.54161376", "0.54082", "0.5400646", "0.5400646", "0.5395616", "0.5393297", "0.5392562", "0.5384247", "0.5382745", "0.5373526", "0.5371495", "0.5360267", "0.5351008", "0.53454655", "0.5342498", "0.5339118", "0.53365946", "0.533274" ]
0.6710219
1
Gets the first item of the heap (but doesn't remove it).
def get_first(self): return self.A[1][0] if self.n > 0 else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def peek_first(self):\n if len(self._heap) == 0:\n return None\n else:\n return self._heap[0]", "def front(self):\n if self.size() < 1:\n return None\n else:\n # TODO: Return min item from heap, if any\n ...", "def min(self):\r\n if self._size == 0: raise KeyError # Nothing to return if heap empty\r\n return self._data[0] # so simple!\r", "def peek(self):\n if self.is_empty():\n raise ValueError(\"Heap is empty\")\n return self._heap[0]", "def get_min(self) -> object:\n if self.is_empty()==True:\n return None\n return self.heap.get_at_index(0)", "def peek(self):\n heaps = self.priorities\n keys = heaps.keys()\n key = min(keys)\n heap = heaps[key]\n heap_list = heap.heap_list\n\n if len(heap_list) == 0:\n self._remove_key()\n keys = heaps.keys()\n key = min(keys)\n heap = heaps[key]\n heap_list = heap.heap_list\n\n return heap_list[0]", "def poll(self):\n assert len(self.heap) > 0, \"ERROR: Heap is empty.\"\n item = self.heap[0]\n self.heap[0] = self.heap.pop()\n self.heapify_down()\n return item", "def peek(self):\n assert len(self.heap) > 0, \"ERROR: Heap is empty.\"\n return self.heap[0]", "def findmin(self):\n return self.heap[0] if len(self.heap) > 0 else None", "def remove_min(self):\r\n # Should raise an exception of size is 0...\r\n if self._size == 0: raise KeyError # Can't remove from an empty heap\r\n result = self._data[0] # remember the smallest\r\n self._data[0] = None # None is so we don't have a reference.\r\n self._size -= 1 # don't forget we have one less\r\n # bring the last to the front and stick the None at the end\r\n self.swap(0, self._size)\r\n # and let the item inserted at the front \"drift down\"\r\n self.down_heap(0)\r\n return result # finally return what was the minimum\r", "def extractmin(self):\n if len(self.heap) == 0: \n return None\n i = self.heap[0]\n last = self.heap[-1]\n del self.heap[-1]\n if len(self.heap) > 0:\n self.siftdown(last, 0)\n return i", "def first(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Queue is empty')\n\t\treturn self._head._element", "def first(self):\n if self.is_empty():\n raise Empty(\"Queue undeflow.\")\n return self._head._element", "def first(self):\r\n if self.is_empty():\r\n raise Empty(\"Queue is empty\")\r\n return self._head._element", "def get(self):\n size = self.size()\n if size < 0:\n return None\n res = self.heap[0]\n self.heap[0], self.heap[size - 1] = self.heap[size - 1], self.heap[0]\n self.heap.pop()\n self.sift_down(0)\n # self.sift_down_recursion(0)\n return res", "def get_min(self) -> object:\n if not self.is_empty():\n return self.heap.get_at_index(0)\n else:\n raise MinHeapException", "def extractMin(self):\n if not self.heap:\n raise IndexError(\"there is no root\")\n elif len(self.heap) < 2:\n return self.heap.pop()\n else:\n self.heap[0], oldMin = self.heap.pop(), self.heap[0]\n self._shiftDown()\n return oldMin", "def peek(self):\n if self.heap:\n return self.heap[0]\n else:\n raise IndexError(\"there is no root\")", "def pop_smallest(self):\n smallest = heapq.heappop(self.heap)\n del self.set[smallest]\n return smallest", "def pop(self):\n try:\n top_node = self._heap[0]\n self._heap = [self._heap[-1]] + self._heap[1:-1]\n self.sort_down(0)\n return top_node\n except IndexError:\n raise IndexError('Cannot pop from an empty heap')", "def first(self):\n if self.is_empty():\n raise Empty('Queue is empty')\n return self._data[self._front]", "def first(self):\n if self.is_empty():\n raise Empty('Queue is empty')\n return self._head._element # front aligned with head of list", "def first(self):\n if self.is_empty():\n raise Empty(\"Queue is empty.\")\n head = self._tail._next\n return head._element", "def first(self):\n if self.head is None:\n raise Exception(\"nothing in queue\")\n return self.head.value", "def pop(self):\n if len(self.heap)==0:\n raise ValueError(\"Tried popping empty heap\")\n return heapq.heappop(self.heap)[1]", "def pop_smallest(self):\n values = [item[0] for item in self.items] #list of the values\n #values = L[:]\n heapq.heapify(values)\n smallest = heapq.heappop(values)#not forgetting heapq.heapify(values)\n #directly writing t = heapq.heappop([4,2,4]) would result in t = 4\n i = self.getItemByValue(smallest)\n self.items.remove(i)\n return i[1]", "def peek(self):\n if self.isEmpty():\n raise Exception(\"Stack underflow\") # Nothing to peek at\n return self.first.Item # most recently added item", "def pop(self):\n if self.heap == [0]:\n raise EmptyHeapException('Heap is empty.')\n self.heap[1], self.heap[-1] = self.heap[-1], self.heap[1]\n minimum = self.heap[-1] # Store min val to return later\n self.heap = self.heap[:-1] # Remove final element\n self._percolate_down(1)\n return minimum", "def peek(self):\n if self.is_empty():\n raise ValueError('Queue underflow')\n return self.first.item", "def peek(self):\n if self.isEmpty(): \n raise Exception(\"Queue underflow\")\n return self._q[self._first]", "def top(self):\n if len(self.heap) > 0:\n return self.heap[0]\n\n return None", "def top(self):\n if len(self.heap) > 0:\n return self.heap[0]\n\n return None", "def pop(self):\n try:\n result = self._heap_list.pop(0)\n except IndexError:\n raise IndexError(\"Cannot pop from an empty heap.\")\n self._build_heap()\n return result", "def dequeue(self):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Remove and return min item from heap, if any\n ...", "def min(self):\n return self.heap[1]", "def first(self) -> Optional[T]:\n if len(self.entry_finder) == 0:\n return None\n for (_, _, (item,)) in self.priority_queue:\n if item is not None:\n return cast(T, item)\n return None", "def first(self):\n if self.is_empty():\n raise ValueError('Queue is empty!')\n return self.root().element().value()", "def extractMinimum(self):\n\n return self.heap[1]", "def min(self):\n return tuple(self.__heap[0])", "def peek(self):\n if len(self.priority_queue.values()):\n nextkey = 0\n while nextkey not in self.priority_queue:\n nextkey += 1\n return self.priority_queue[nextkey][0]\n else:\n raise IndexError(\"There's nothing in your queue\")", "def peek(self):\n return self.m * self.heap[0] if self.heap else None", "def heappop(heap):\n lastelt = heap.pop() # raises appropriate IndexError if heap is empty\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n Heap.siftup(heap, 0)\n return returnitem\n return lastelt", "def get_min(h: Heap) -> Node:\n prev, curr = _min(h)\n return curr", "def pop(self):\n heap = self.heap\n if len(heap) < 1:\n return None\n\n ret_val = self.front()\n self.__delete(0)\n return ret_val", "def pop(self):\n while self.heap:\n priority, count, smallest = hpq.heappop(self.heap)\n if smallest is not self.REMOVED:\n del self.set[smallest]\n return priority, smallest\n raise KeyError('pop from an empty priority queue')", "def peek_up(self):\n return self.heap[0]", "def top(heap):\n return heap[_root()]", "def top(self): # O(1)\n if not self.queue:\n return None\n return self.queue[0]", "def pop(self):\n\t\tif self.heap:\n\t\t\treturn heapq.heappop(self.heap)[1]\n\t\telse:\n\t\t\traise Exception('Trying to pop from empty PriorityQueue.')", "def _find_min(self):\n if self.is_empty(): # is_empty inherited from base class\n raise Empty('Priority queue is empty')\n small = self._data.first()\n walk = self._data.after(small)\n while walk is not None:\n if walk.element() < small.element():\n small = walk\n walk = self._data.after(walk)\n return small", "def getItem(self):\n with self.lock:\n if self.isEmpty():\n return None\n else:\n returnval = heapq.heappop(self.ItemList)\n self.ItemHashList.pop(returnval[1])\n return returnval", "def firstElement(self):\n return self.top()", "def peek(self):\r\n if self.size():\r\n return self.queue[0]\r\n else:\r\n return None", "def peek_front(self):\n\n if self.items:\n return self.items[0]\n return None", "def pop(self):\n temp = self.elements.pop(0)\n self._heapify()\n return temp", "def extract_min(self):\n if self.is_empty():\n raise ValueError(\"Priority queue is empty\")\n\n edge_tuple = heapq.heappop(self.__heap)\n ew = edge_tuple[1]\n return ew.edge()", "def pop(self) -> tuple:\n item = self.__heap.popleft()\n\n if len(self) > 1:\n self.__heap.appendleft(self.__heap.pop())\n self.__sift_down(0)\n\n return item", "def front(self):\n return self.queue[0] if not self.empty() else None", "def first(self):\n return self.deque[0]", "def pop_min(self):\n if self.get_size() == 0:\n return None\n\n # put minimum item at the end\n self.swap(0, len(self.table) - 1)\n\n # and remove it from the list;\n item = self.table.pop()\n\n # then fix new root\n self.percolate_down(0)\n return item", "def peek_first(self):\n if self.is_empty(): raise RuntimeError(\"Empty list\")\n return self.head.data", "def pop(self) -> T:\n while self.priority_queue:\n _, _, (item,) = heapq.heappop(self.priority_queue)\n if item is not None:\n del self.entry_finder[item] # type: ignore\n return cast(T, item)\n raise KeyError('pop from an empty priority queue')", "def extract_min(self):\r\n if self.is_empty():\r\n return None\r\n min_elem = self.heap_array[0]\r\n aux_elem = self.heap_array.pop()\r\n\r\n if self.is_empty() == False:\r\n self.heap_array[0] = aux_elem\r\n\r\n current_index = 0\r\n left_child_index = (2 * current_index) + 1\r\n current_value = self.heap_array[current_index]\r\n\r\n while left_child_index < len(self.heap_array): # loop that will repeat until no violation of the minheap properties exist\r\n current_min = current_value\r\n\r\n for i in range(2): # this loop is in place so that both children are compared and the smaller of the two is chosen \r\n if (left_child_index + i) > len(self.heap_array)-1: # condition to avoid out of bounds\r\n continue\r\n else:\r\n if int(self.heap_array[left_child_index + i]) < int(current_min): # if child is smaller than parent\r\n current_min = self.heap_array[left_child_index + i ] # set current minimum value\r\n current_min_index = left_child_index + i # and cureent minimim index( index where current minimum value is found )\r\n if current_min == current_value: # if no property is broken (in this case, the parent is actually less than its' children)\r\n break\r\n else: # if propert is broken\r\n self.heap_array[current_index], self.heap_array[current_min_index] = self.heap_array[current_min_index], self.heap_array[current_index] # swap the elements \r\n current_index = current_min_index\r\n left_child_index = int((2 * current_index) + 1)\r\n return min_elem", "def pop(self):\n\n assert self.size > 0, \"Cannot pop item! The MaxHeap is empty!\"\n ret = self.items[0]\n self.items[0] = self.items[self.size - 1]\n self.items[self.size - 1] = None\n self.size -= 1\n self._shift_down(0)\n return ret", "def pop(self) -> Optional[T]:\n try:\n elem = heapq.heappop(self._heap).val\n self._unique_values.remove(elem)\n except IndexError:\n return None\n return elem", "def pop(self):\n _, _, obj = heapq.heappop(self._heap)\n return obj", "def pop(self):\r\n try:\r\n key = heapq.heappop(self.heap)\r\n return self.elements[key]\r\n except:\r\n raise StopIteration", "def remove_min(self) -> Optional[T]:\n if self._array == []:\n return None\n else:\n # Remove top node\n value = self._array[0]\n self._array = self._array[1:]\n # If nodes remaing in the min heap...\n if self._array:\n # Move end node to the top\n end_node = self._array.pop()\n self._array = [end_node] + self._array\n # Rebuild the heap (heapify)\n self.__build()\n # Return the top node\n return value", "def first(self):\r\n return self.__head", "def delete_min(self):\n #The length is 1 because the heap list was initialized with 0\n if len(self.heap_list) == 1:\n return \"Empty heap.\"\n\n #Store the min value of the heap\n top = self.heap_list[1]\n\n #Move the last value of the heap to the top\n self.heap_list[1] = self.heap_list[self.current_size]\n\n #Pop the last value from the heap (that was moved to the top)\n *self.heap_list, _ = self.heap_list\n\n # Decrease the size of the heap\n self.current_size -= 1\n\n #Move down the top value to the appropriate position (following the definition of a min heap)\n #The value is at index 1 since the heap list was initialized with 0) \n self.sift_down(1)\n\n #Return the min value of the heap\n return top", "def pop(self):\r\n if self.point_to_head.chi == None:\r\n return None\r\n first_element = self.point_to_head.chi.val\r\n self.point_to_head.chi = self.point_to_head.chi.chi\r\n return first_element", "def peek(self):\n return self.list.head", "def first(self):\n return self.__head", "def heappop(heap):\n lastelt = heap.pop()\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n _siftup(heap, 0)\n return returnitem\n return lastelt", "def dequeue(self): # total O(1)\n topItem = self._queue[self._start] #O(1)\n self._queue[self._start] = None #O(1)\n self._start = (self._start+1)% self._capacity #O(1)\n self._size -= 1 #O(1)\n return topItem #O(1)", "def peek(self):\n # TODO: Return top item, if any\n print('self.is_empty()', self.is_empty())\n if self.is_empty():\n return None\n print('self.top', self.list.head.data)\n return self.list.head.data", "def _pop_first(self) -> Any:\n if self.is_empty():\n raise IndexError\n return self.pop(0)", "def pop(self):\n\n while self.heap:\n# #logger_cagada.debug(\"elem de heap %s\" % self.heap)\n priority, node = self.heappop(self.heap)\n if node is not self.REMOVED:\n del self.entry_finder[node]\n return priority, node\n raise KeyError('pop from an empty priority queue')", "def pop(self):\n return heappop(self.priority_queue)[1]", "def peek(self):\n pop = self.list_x[0]\n return pop", "def getFront(self):\n\t\tfront = self.queue[self.front]\n\t\treturn front\n\t\tpass", "def remove_front(self):\n\n if self.items:\n return self.items.pop(0)\n return None", "def first(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._head._element # front aligned with head of list", "def first(self):\n return self.head and self.head.value or None", "def pop(self):\n if self.n == 0:\n raise ValueError(\"Heap is empty\")\n value = self.ar[0]\n self.n -= 1\n self.ar[0] = self.ar[self.n]\n self.heapify(0)\n return value", "def pop(self):\n\n def sub_pop():\n heaps = self.priorities\n keys = heaps.keys()\n keys = min(keys)\n heap = heaps[keys]\n pop = heap.pop()\n return pop\n\n try:\n val = sub_pop()\n except IndexError:\n self._remove_key()\n val = sub_pop()\n\n return val", "def remove(self):\n max_item = self.heaplist[1]\n self.heaplist[1] = self.heaplist[self.currentsize]\n self.currentsize -= 1\n self.heaplist.pop()\n self.shift_item_down(1)\n return max_item", "def peek(self):\n if self.is_empty():\n return None\n return self.list.head.data", "def pop(self):\n if self.size is 0:\n print \"Heap is empty\"\n return\n # Swap the top most element with the last one\n self._swap(0, self.size - 1)\n poppedKey = self.queue[self.size - 1]\n # Reduce the size of the queue\n self.size -= 1\n # Rebalance\n self._heapify(0)\n return poppedKey", "def peek(self):\n\t\tif not self.is_empty():\n\t\t\treturn self.items[-1]\n\t\telse:\n\t\t\treturn None", "def peek(self):\n # TODO: Return top item, if any\n print(\"self.list P\", self.list)\n print(\"length\", self.length())\n if self.is_empty():\n return None\n else:\n return self.list[self.length()-1]\n # do n-1\n # return self.list[-]", "def pop(self):\n if not self.empty():\n self.size -= 1\n return heapq.heappop(self.queue)\n else:\n return None", "def peek(self):\n if not self.empty():\n return self.queue[-1]\n return None", "def delete_min(self):\n\n self.switch(0, -1)\n\n min = self.heap.pop(-1)\n\n self.bubble_down(0)\n\n return min", "def peek(self):\n if self._size == 0:\n raise EmptyQueue('pop from empty queue')\n priority = self._heap[0].priority\n value = self._heap[0].value\n key = self._heap[0].key\n return priority, value, key", "def peek(self):\r\n return self.queue[0]", "def peek(self):\r\n return self.queue[0]", "def peek(self):\n if self.count() <= 0:\n raise ValueError('Cannot peek at value that does not exist')\n return self.items[1]", "def front(self):\n heap = self.heap\n if len(heap) == 0:\n return None\n item = heap[0]\n to_node = item[self.TO_NODE]\n from_node = item[self.FROM_NODE]\n value = item[self.VALUE]\n return from_node, to_node, value", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)", "def peek(self):\n\n if self.is_empty():\n return None\n\n return self._list[-1]" ]
[ "0.82751524", "0.82521206", "0.8044901", "0.8024893", "0.8007148", "0.79513764", "0.7847617", "0.7768226", "0.7648149", "0.76431507", "0.7619455", "0.7604687", "0.75944394", "0.7558453", "0.7549099", "0.7532435", "0.74502736", "0.74292153", "0.74141717", "0.7407734", "0.7378194", "0.7361816", "0.73446274", "0.7293349", "0.7288812", "0.72768843", "0.7246906", "0.72462976", "0.7228068", "0.72124404", "0.72072184", "0.72072184", "0.7186108", "0.717752", "0.7169406", "0.71375215", "0.70896935", "0.7078827", "0.7065331", "0.70549214", "0.70520186", "0.70375365", "0.703281", "0.70327085", "0.7025286", "0.70161116", "0.7013431", "0.70018154", "0.69968146", "0.6953256", "0.69414353", "0.69380456", "0.6905407", "0.6896072", "0.68922204", "0.6890994", "0.68897825", "0.6886368", "0.6880442", "0.6878718", "0.6868389", "0.68659216", "0.6854489", "0.68414056", "0.6839987", "0.6828428", "0.6811464", "0.6801029", "0.6786982", "0.67865205", "0.67801154", "0.6778615", "0.67757416", "0.67743886", "0.6771267", "0.67678744", "0.6766589", "0.6755487", "0.6721042", "0.67178977", "0.6716974", "0.67114776", "0.67110145", "0.67058104", "0.6705637", "0.6698831", "0.6683255", "0.6677264", "0.66667247", "0.66662866", "0.6662916", "0.6662054", "0.6661644", "0.66573435", "0.6654127", "0.6653857", "0.6653857", "0.6651049", "0.66475636", "0.66371036", "0.6632477" ]
0.0
-1
Gets the first item of the heap and removes it.
def delete_first(self): if self.n == 0: return None first = self.A[1] self.n -= 1 last = self.A.pop() if self.n > 0: self.A[1] = last self.pos[last[0]] = 1 self.combine(1) return first[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_min(self):\r\n # Should raise an exception of size is 0...\r\n if self._size == 0: raise KeyError # Can't remove from an empty heap\r\n result = self._data[0] # remember the smallest\r\n self._data[0] = None # None is so we don't have a reference.\r\n self._size -= 1 # don't forget we have one less\r\n # bring the last to the front and stick the None at the end\r\n self.swap(0, self._size)\r\n # and let the item inserted at the front \"drift down\"\r\n self.down_heap(0)\r\n return result # finally return what was the minimum\r", "def poll(self):\n assert len(self.heap) > 0, \"ERROR: Heap is empty.\"\n item = self.heap[0]\n self.heap[0] = self.heap.pop()\n self.heapify_down()\n return item", "def remove(self):\n max_item = self.heaplist[1]\n self.heaplist[1] = self.heaplist[self.currentsize]\n self.currentsize -= 1\n self.heaplist.pop()\n self.shift_item_down(1)\n return max_item", "def delMin(self):\n retval = self.heapList[1]\n self.heapList[1] = self.heapList[self.currentSize]\n self.currentSize = self.currentSize - 1\n self.heapList.pop()\n self.percDown(1)\n return retval", "def pop(self):\n try:\n result = self._heap_list.pop(0)\n except IndexError:\n raise IndexError(\"Cannot pop from an empty heap.\")\n self._build_heap()\n return result", "def pop(self):\n try:\n top_node = self._heap[0]\n self._heap = [self._heap[-1]] + self._heap[1:-1]\n self.sort_down(0)\n return top_node\n except IndexError:\n raise IndexError('Cannot pop from an empty heap')", "def remove(self): \n \n popped = self.Heap[self.FRONT] \n self.Heap[self.FRONT] = self.Heap[self.size] \n self.size-= 1\n self.min_heapify(self.FRONT) \n return popped", "def pop(self):\n if len(self.heap)==0:\n raise ValueError(\"Tried popping empty heap\")\n return heapq.heappop(self.heap)[1]", "def pop(self):\n _, _, obj = heapq.heappop(self._heap)\n return obj", "def pop(self):\n heap = self.heap\n if len(heap) < 1:\n return None\n\n ret_val = self.front()\n self.__delete(0)\n return ret_val", "def pop_smallest(self):\n smallest = heapq.heappop(self.heap)\n del self.set[smallest]\n return smallest", "def pop(self):\n if self.heap == [0]:\n raise EmptyHeapException('Heap is empty.')\n self.heap[1], self.heap[-1] = self.heap[-1], self.heap[1]\n minimum = self.heap[-1] # Store min val to return later\n self.heap = self.heap[:-1] # Remove final element\n self._percolate_down(1)\n return minimum", "def pop(self):\n\n assert self.size > 0, \"Cannot pop item! The MaxHeap is empty!\"\n ret = self.items[0]\n self.items[0] = self.items[self.size - 1]\n self.items[self.size - 1] = None\n self.size -= 1\n self._shift_down(0)\n return ret", "def dequeue(self):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Remove and return min item from heap, if any\n ...", "def pop(self):\n temp = self.elements.pop(0)\n self._heapify()\n return temp", "def heappop(heap):\n lastelt = heap.pop() # raises appropriate IndexError if heap is empty\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n Heap.siftup(heap, 0)\n return returnitem\n return lastelt", "def pop(self):\n return heapq.heappop(self.heap)", "def pop(self):\r\n try:\r\n key = heapq.heappop(self.heap)\r\n return self.elements[key]\r\n except:\r\n raise StopIteration", "def remove(self):\n # non empty heap: get first element\n if len(self.heap) > 0:\n removed = self.heap[0]\n\n # empty heap: return None\n else:\n return None\n\n # heap with one element: remove it and return\n if len(self.heap) == 1:\n return self.heap.pop()\n\n # put last element on the begining of the heap\n self.heap[0] = self.heap.pop()\n\n # descend new root while needed\n index, leftChild, rightChild = self.getChilds(0)\n while (leftChild < self.size() and \\\n self.heap[index] < self.heap[leftChild]) or \\\n (rightChild < self.size() and \\\n self.heap[index] < self.heap[rightChild]):\n\n # swap smallest child with parent\n if rightChild == len(self.heap) or \\\n self.heap[leftChild] > self.heap[rightChild]:\n\n # swap with left child and set current node as left child\n self.swap(index, leftChild)\n index, leftChild, rightChild = self.getChilds(leftChild)\n\n else:\n # swap with right child and set current node as right child\n self.swap(index, rightChild)\n index, leftChild, rightChild = self.getChilds(rightChild)\n\n # return removed node\n return removed", "def pop(self) -> tuple:\n item = self.__heap.popleft()\n\n if len(self) > 1:\n self.__heap.appendleft(self.__heap.pop())\n self.__sift_down(0)\n\n return item", "def pop(self):\n\n while self.heap:\n# #logger_cagada.debug(\"elem de heap %s\" % self.heap)\n priority, node = self.heappop(self.heap)\n if node is not self.REMOVED:\n del self.entry_finder[node]\n return priority, node\n raise KeyError('pop from an empty priority queue')", "def pop(self):\n while self.heap:\n priority, count, smallest = hpq.heappop(self.heap)\n if smallest is not self.REMOVED:\n del self.set[smallest]\n return priority, smallest\n raise KeyError('pop from an empty priority queue')", "def pop(self):\n root = self.heap[1]\n del self.rank[root]\n x = self.heap.pop() # remove last leaf\n if self: # if heap is not empty\n self.heap[1] = x # put last leaf to root\n self.rank[x] = 1\n self.down(1) # maintain heap order\n return root", "def pop(self):\n\t\tif self.heap:\n\t\t\treturn heapq.heappop(self.heap)[1]\n\t\telse:\n\t\t\traise Exception('Trying to pop from empty PriorityQueue.')", "def pop(self):\n root = self.heap[1]\n del self.rank[root]\n x = self.heap.pop() # remove last leaf\n if self: # if heap is not empty\n self.heap[1] = x # move the last leaf\n self.rank[x] = 1 # to the root\n self.down(1) # maintain heap order\n return root", "def pop(self):\n if self.n == 0:\n raise ValueError(\"Heap is empty\")\n value = self.ar[0]\n self.n -= 1\n self.ar[0] = self.ar[self.n]\n self.heapify(0)\n return value", "def delete_max(self):\n retval = self.heap_list[1]\n self.heap_list[1] = self.heap_list[self.size]\n self.size = self.size - 1\n pop_val = self.heap_list.pop()\n self.percolate_down(1)\n return retval", "def delete_min(self):\n\n self.switch(0, -1)\n\n min = self.heap.pop(-1)\n\n self.bubble_down(0)\n\n return min", "def delete_min(self):\n #The length is 1 because the heap list was initialized with 0\n if len(self.heap_list) == 1:\n return \"Empty heap.\"\n\n #Store the min value of the heap\n top = self.heap_list[1]\n\n #Move the last value of the heap to the top\n self.heap_list[1] = self.heap_list[self.current_size]\n\n #Pop the last value from the heap (that was moved to the top)\n *self.heap_list, _ = self.heap_list\n\n # Decrease the size of the heap\n self.current_size -= 1\n\n #Move down the top value to the appropriate position (following the definition of a min heap)\n #The value is at index 1 since the heap list was initialized with 0) \n self.sift_down(1)\n\n #Return the min value of the heap\n return top", "def pop_smallest(self):\n values = [item[0] for item in self.items] #list of the values\n #values = L[:]\n heapq.heapify(values)\n smallest = heapq.heappop(values)#not forgetting heapq.heapify(values)\n #directly writing t = heapq.heappop([4,2,4]) would result in t = 4\n i = self.getItemByValue(smallest)\n self.items.remove(i)\n return i[1]", "def heap_pop(self, value):\n if value is None or self.get_size() == 0:\n return\n\n if self.find(value) is not None:\n # end of list\n position = self.find(value)\n last = self.get_size() - 1\n\n # pop element and percolate down\n self.swap(position, last)\n self.table.pop()\n self.percolate_down(position)\n return", "def delete(self):\n first = self.data[0]\n self.data.pop(0)\n self.size = self.size - 1\n return first", "def pop(self) -> T:\n while self.priority_queue:\n _, _, (item,) = heapq.heappop(self.priority_queue)\n if item is not None:\n del self.entry_finder[item] # type: ignore\n return cast(T, item)\n raise KeyError('pop from an empty priority queue')", "def pop(self) -> Optional[T]:\n try:\n elem = heapq.heappop(self._heap).val\n self._unique_values.remove(elem)\n except IndexError:\n return None\n return elem", "def pop(self):\n if not self.empty():\n self.size -= 1\n return heapq.heappop(self.queue)\n else:\n return None", "def pop(self):\r\n while self.pq:\r\n priority, count, task = heapq.heappop(self.pq)\r\n if task is not self.REMOVED:\r\n del self.entry_finder[task]\r\n return task\r\n raise KeyError('pop from an empty priority queue')", "def pop(self):\n self.data[0], self.data[-1] = self.data[-1], self.data[0]\n result = self.data.pop()\n self.heapify_down(0)\n return result", "def heappop(heap):\n lastelt = heap.pop()\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n _siftup(heap, 0)\n return returnitem\n return lastelt", "def remove_min(self) -> Optional[T]:\n if self._array == []:\n return None\n else:\n # Remove top node\n value = self._array[0]\n self._array = self._array[1:]\n # If nodes remaing in the min heap...\n if self._array:\n # Move end node to the top\n end_node = self._array.pop()\n self._array = [end_node] + self._array\n # Rebuild the heap (heapify)\n self.__build()\n # Return the top node\n return value", "def pop(self) -> Article:\n return heapq.heappop(self.heap)", "def pop(self):\n if self.size is 0:\n print \"Heap is empty\"\n return\n # Swap the top most element with the last one\n self._swap(0, self.size - 1)\n poppedKey = self.queue[self.size - 1]\n # Reduce the size of the queue\n self.size -= 1\n # Rebalance\n self._heapify(0)\n return poppedKey", "def remove(self):\n # non empty heap: get first element\n if len(self.heap) > 0:\n removed = self.heap[0]\n\n # empty heap: return None\n else:\n return None\n\n # heap with one element: remove it and return\n if len(self.heap) == 1:\n return self.heap.pop()\n\n # put last element on the begining of the heap\n self.heap[0] = self.heap.pop()\n\n # descend new root while needed\n index = 0\n leftChild = (2 * index) + 1\n rightChild = (2 * index) + 2\n while (leftChild < len(self.heap) and \\\n self.heap[index][1] > self.heap[leftChild][1]) or \\\n (rightChild < len(self.heap) and \\\n self.heap[index][1] > self.heap[rightChild][1]):\n\n # swap smallest child with parent\n if rightChild == len(self.heap) or \\\n self.heap[leftChild][1] < self.heap[rightChild][1]:\n\n # swap with left child\n swap = self.heap[index]\n self.heap[index] = self.heap[leftChild]\n self.heap[leftChild] = swap\n\n # update indexes\n index = leftChild\n leftChild = (2 * index) + 1\n rightChild = (2 * index) + 2\n\n else:\n\n # swap with right child\n swap = self.heap[index]\n self.heap[index] = self.heap[rightChild]\n self.heap[rightChild] = swap\n\n # update indexes\n index = rightChild\n leftChild = (2 * index) + 1\n rightChild = (2 * index) + 2\n\n # return removed node\n return removed", "def remove_min(self):\r\n try:\r\n if self.is_empty():\r\n raise \"List is Empty\"\r\n \r\n self.swap(0,len(self._data)-1) \r\n element = self._data.pop() # remove the value from list.\r\n self._heapify_after_remove(0) # heapify the list\r\n return element._key, element._value\r\n \r\n except Exception, e:\r\n print \"Error occurred in HeapDistance: remove_min\", e\r\n print traceback.print_exc(e)", "def deleteMin(self):\n heap = self._heap\n position = self._position\n\n try:\n end = heap.pop(-1)\n except IndexError:\n raise KeyError('pqdict is empty')\n\n if heap:\n node = heap[0]\n # grab last node in PQ to root and sink it down appropriately\n heap[0] = end\n position[end.key] = 0\n self._sink(0)\n else:\n node = end\n del position[node.key] # delete index from position dict\n return node.key, node.value", "def getItem(self):\n with self.lock:\n if self.isEmpty():\n return None\n else:\n returnval = heapq.heappop(self.ItemList)\n self.ItemHashList.pop(returnval[1])\n return returnval", "def pushpop(self, item):\n return heapq.heappushpop(self.heap, item)", "def pop_first(self):\n self.pop_item(0)", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty')\n self._swap(0, len(self) - 1)\n item = self._data.pop()\n self._down_heap(0)\n return (item._key, item._value)", "def remove_front(self):\n\n if self.items:\n return self.items.pop(0)\n return None", "def pop(self):\n\n def sub_pop():\n heaps = self.priorities\n keys = heaps.keys()\n keys = min(keys)\n heap = heaps[keys]\n pop = heap.pop()\n return pop\n\n try:\n val = sub_pop()\n except IndexError:\n self._remove_key()\n val = sub_pop()\n\n return val", "def dequeue(self):\n if self._size == 0:\n raise EmptyQueue('dequeue from empty queue')\n priority = self._heap[0].priority\n value = self._heap[0].value\n key = self._heap[0].key\n del self._index[key]\n item = self._heap.pop()\n self._size -= 1\n if self._size == 0:\n return priority, value, key\n self._heap[0] = item\n self._index[item.key] = 0\n self._sift_down(0)\n return priority, value, key", "def delete_first(self):\n self.deque.pop(0)", "def pop(self):\n heap = self.heap\n popped_key = heap[1]\n if len(heap) == 2:\n return heap.pop()\n heap[1] = key = heap.pop()\n\n i = 1\n while True:\n left = i * 2\n if len(heap) <= left:\n break\n left_key = heap[left]\n right = i * 2 + 1\n right_key = right < len(heap) and heap[right]\n if right_key and right_key < left_key:\n child_key = right_key\n child = right\n else:\n child_key = left_key\n child = left\n if key <= child_key:\n break\n self.heap[i], self.heap[child] = child_key, key\n i = child\n return popped_key", "def get(self):\n size = self.size()\n if size < 0:\n return None\n res = self.heap[0]\n self.heap[0], self.heap[size - 1] = self.heap[size - 1], self.heap[0]\n self.heap.pop()\n self.sift_down(0)\n # self.sift_down_recursion(0)\n return res", "def popitem(self):\n return self.pop(0)", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)", "def pop(self):\n (cost, node) = heapq.heappop(self.heap)\n self.states.pop(node.state, None) # remove state\n return node", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)", "def pop(self):\n result = self.peek()\n self.item_count -= 1\n index = 1\n mem_size = len(self.items)\n while True:\n left = index * 2\n right = left + 1\n if self.is_invalid_index(left) and self.is_invalid_index(right):\n # Neither child exists, so delete this item.\n self.mark_invalid_index(index)\n return result\n elif self.is_invalid_index(right):\n # Right child does not exist, so bubble up from left.\n self.items[index] = self.items[left]\n index = left\n elif self.is_invalid_index(left):\n # Left child does not exist, so bubble up from right.\n self.items[index] = self.items[right]\n index = right\n elif self.is_heap_order(self.items[left], self.items[right]):\n # Left child should be on top, so bubble up from left.\n self.items[index] = self.items[left]\n index = left\n else:\n # Right child should be on top, so bubble up from right.\n self.items[index] = self.items[right]\n index = right", "def peek(self):\n heaps = self.priorities\n keys = heaps.keys()\n key = min(keys)\n heap = heaps[key]\n heap_list = heap.heap_list\n\n if len(heap_list) == 0:\n self._remove_key()\n keys = heaps.keys()\n key = min(keys)\n heap = heaps[key]\n heap_list = heap.heap_list\n\n return heap_list[0]", "def pop(self):\n priority, value = heappop(self._heap)\n return (-1 * priority, value)", "def remove_min(self) -> object:\n if self.is_empty():\n raise MinHeapException\n return\n parent_index=0\n parent=self.get_min()\n #parent=5\n #print(parent)\n #print(self)\n self.heap.swap(parent_index,self.heap.length()-1)\n self.heap.pop()\n if self.is_empty():\n return parent\n min_child=self.find_min_child(1,2)\n while min_child!=None:\n if self.heap.get_at_index(min_child)>self.heap.get_at_index(parent_index):\n break\n self.heap.swap(min_child,parent_index)\n parent_index=min_child\n if parent_index==None:\n break\n min_child=self.find_min_child((parent_index * 2)+1,(parent_index * 2) + 2)\n return parent", "def pop(self):\n priority, key = self.__heap[0]\n self.__swap(0, len(self.__heap) - 1)\n del self.__position[key]\n del self.__heap[-1]\n\n if self:\n self.__bubble_down(0)\n\n return priority, key", "def __delitem__(self, key):\n\t\ttry:\n\t\t\tdel self.heap[[item == key for _, item in self.heap].index(True)]\n\t\texcept ValueError:\n\t\t\traise KeyError(str(key) + \" is not in the priority queue\")\n\t\theapq.heapify(self.heap)", "def dequeue(self): # total O(1)\n topItem = self._queue[self._start] #O(1)\n self._queue[self._start] = None #O(1)\n self._start = (self._start+1)% self._capacity #O(1)\n self._size -= 1 #O(1)\n return topItem #O(1)", "def pop_min(self):\n if self.get_size() == 0:\n return None\n\n # put minimum item at the end\n self.swap(0, len(self.table) - 1)\n\n # and remove it from the list;\n item = self.table.pop()\n\n # then fix new root\n self.percolate_down(0)\n return item", "def delete_top_from_max_heap(x):\n last = x[-1]\n x = x.at[0].set(last)[:-1]\n return heapify_subtree(x, 0)", "def delete_first(self):\n if self._size == 0:\n raise Empty('Dequeue is empty')\n return self._delete_node(self._head._next)", "def pop(self):\n self.queue.insert(len(self.queue), self.queue[0])\n self.queue.remove(self.queue[0])\n return self.queue.pop()", "def pop(self): # 06:30 Lecture Week 2 \"Stacks\" (16:24)\n if self.isEmpty():\n raise Exception(\"Stack underflow\")\n item = self.first.Item # save item to return\n self.first = self.first.Next # delete last Node added\n self.N -= 1\n return item # return the saved item", "def remove_min(self):\r\n if self.is_empty():\r\n raise Exception('Priority queue is empty.')\r\n self._swap(0, len(self._data) - 1) # put minimum item at the end\r\n item = self._data.pop() # and remove it from the list;\r\n self._downheap(0) # then fix new root\r\n return (item._key, item._value)", "def remove_min(self):\n if self._size == 1: # Only root node in heap\n return self._delete_node(self.root())\n min_node = self._array[0] # Root node has min value\n last = self._array[self._size-1] # Bottom-right-most node\n self._swap(min_node, last) # Move last node to root\n element = self._delete_node(min_node) # Delete root\n self._downheap(last) # Down-heap bubble last node\n if self._size == self._N//4 and self._N > BinaryTree.DEFAULT_CAPACITY:\n self._resize_array(self._N // 2) # Halve size of array\n return element", "def pop(self):\n if self._size > 0:\n elem = self.first.data\n self.first = self.first.next\n self._size = self._size - 1\n return elem\n \n raise IndexError('The queue is empty! ')", "def remove(self):\r\n if self.first() is not None:\r\n self.dec_size()\r\n self.set_first(self.first().next())\r\n if self.size() == 0: # when there are no more elements in the list,\r\n self.__last = None # remove the pointer to the last element\r", "def pop(self):\n return heapq.heappop(self.array)", "def pop(self):\r\n\r\n if not self.is_empty():\r\n\r\n half_cap = self._capacity // 2\r\n item = self._data[self._size-1]\r\n self._data[self._size-1] = 0\r\n self._size -= 1\r\n\r\n if self._size <= half_cap:\r\n if half_cap != 0:\r\n\r\n self.shrink()\r\n\r\n return item\r\n\r\n else:\r\n pass", "def pop(self):\n return heappop(self.priority_queue)[1]", "def pop(self):\n return self.ll.delete_first()", "def pop(self):\n (_,_,path) = heapq.heappop(self.frontierpq)\n return path", "def pop(self):\n if len(self.priority_queue.values()):\n nextkey = 0\n while nextkey not in self.priority_queue:\n nextkey += 1\n up_next = self.priority_queue[nextkey][0]\n self.priority_queue[nextkey] = self.priority_queue[nextkey][1:]\n return up_next\n else:\n raise IndexError(\"There's nothing in your queue\")", "def peek(self):\n if self.is_empty():\n raise ValueError(\"Heap is empty\")\n return self._heap[0]", "def remove(self) -> T:\n if self.is_empty():\n raise EmptyStackError\n else:\n self._size -= 1\n return self._items.pop()", "def delete_first(self):\n if self.is_empty():\n raise Empty('list is empty')\n answer = self._head._element\n self._head = self._head._next\n self._size -= 1\n if self.is_empty(): # special case as deque is empty\n self._tail = None # removed head had been the tail\n else:\n self._head._prev = None\n return answer", "def pop(self):\n if not self._heap:\n log.debug(\"popped from an empty heap\")\n return\n\n popped_contact = heapq.heappop(self._heap)[1]\n del self._node_dict[popped_contact.getId()]\n\n return popped_contact", "def front(self):\n if self.size() < 1:\n return None\n else:\n # TODO: Return min item from heap, if any\n ...", "def delete(self, node):\n\n # logger_cagada.debug(\"norrando nodo %s\" % (type(node)))\n entry = self.entry_finder.pop(node)\n # logger_cagada.debug(\"la entry q c borra %s\" % entry)\n entry[-1] = self.REMOVED\n # logger_cagada.debug(\"el heap es %s\" % self.heap)\n return entry[0]", "def pop(self):\n return super().remove_item_from_front()", "def remove_min(self) -> Tuple[K, V]:\n while self.queue:\n # pylint: disable=unused-variable\n value, count, key = heapq.heappop(self.queue)\n if value is not REMOVED:\n del self.__key_map__[key]\n return (key, value)\n return None", "def pop(self):\n return self.remove(0)", "def extractmin(self):\n if len(self.heap) == 0: \n return None\n i = self.heap[0]\n last = self.heap[-1]\n del self.heap[-1]\n if len(self.heap) > 0:\n self.siftdown(last, 0)\n return i", "def test_remove(self):\n data = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n h = Heap(data)\n h.remove(2)\n\n self.assertTrue(Heap.is_heap(data), 'should preserve heap property')\n self.assertNotIn(8, h.data, 'the value corresponding to the index was removed')", "def dequeue(self):\n\n item = self.__items__.pop(0)\n return item", "def __heappop(heap, nodes, pos, stopPos = None):\n # Default stopping position to end of heap\n stopPos = stopPos if not None else len(heap) - 1\n \n # Swap target node with stopping position, re-order heap to stopping\n # position minus one, then pop the target node\n Graph.__swapHeapNodes(heap, nodes, pos, stopPos)\n Graph.__siftdown(heap, nodes, pos, stopPos - 1)\n node = heap.pop(stopPos)\n \n # Delete node from dictionary and return\n del nodes[node[1]]\n return node", "def pop(self):\n if self.isEmpty():\n raise KeyError(\"The queue is empty.\")\n oldItem = self._front.data\n self._front = self._front.next\n if self._front is None:\n self._rear = None\n self._size -= 1\n return oldItem", "def min(self):\r\n if self._size == 0: raise KeyError # Nothing to return if heap empty\r\n return self._data[0] # so simple!\r", "def remove_min(self) -> object:\n if self.is_empty() == True:\n raise MinHeapException\n\n # minimum value to be returned\n min_val = self.get_min()\n\n # get last index\n end = self.heap.length() - 1\n\n # root index\n root = 0\n\n # swap first and last nodes and remove last value\n self.heap.swap(root, end)\n self.heap.pop()\n\n # length\n length = self.heap.length()\n\n # left index and right index\n left_i = (2 * root) + 1\n right_i = (2 * root) + 2\n\n # if heap has only one value\n if left_i > length - 1:\n return min_val\n\n # if heap has only left child\n if right_i > length - 1:\n if self.heap.get_at_index(left_i) < self.heap.get_at_index(root):\n self.heap.swap(left_i, root)\n return min_val\n else:\n return min_val\n\n # percolate down heap\n while left_i < length and right_i < length:\n replace_val = self.heap.get_at_index(root)\n left_child = self.heap.get_at_index(left_i)\n right_child = self.heap.get_at_index(right_i)\n\n # find index to swap nodes and check that a node exists\n if self.find_replacement(left_i, right_i, left_child, right_child, replace_val):\n node = self.find_replacement(\n left_i, right_i, left_child, right_child, replace_val)\n\n # swap nodes, set new root and child indices\n self.heap.swap(root, node)\n root = node\n left_i = (node * 2) + 1\n right_i = (node * 2) + 2\n\n return min_val", "def pop(self):\n self._raise_if_empty()\n item = self._top.data\n self._top = self._top.next\n return item", "def peek_first(self):\n if len(self._heap) == 0:\n return None\n else:\n return self._heap[0]", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n item = self._data.delete(self._data.first())\n return (item._key, item._value)", "def extractMin(self):\n if not self.heap:\n raise IndexError(\"there is no root\")\n elif len(self.heap) < 2:\n return self.heap.pop()\n else:\n self.heap[0], oldMin = self.heap.pop(), self.heap[0]\n self._shiftDown()\n return oldMin" ]
[ "0.81402564", "0.80196005", "0.7973454", "0.78661436", "0.78423417", "0.77942383", "0.7791049", "0.7773328", "0.7764015", "0.7732128", "0.7705265", "0.76195896", "0.7605436", "0.75960815", "0.7542752", "0.75387627", "0.7532565", "0.7510987", "0.7498476", "0.74903107", "0.7478519", "0.7469709", "0.74671453", "0.7462812", "0.74346745", "0.7424168", "0.74186915", "0.74147207", "0.73945063", "0.7384677", "0.7374719", "0.73612833", "0.7354891", "0.7348313", "0.7326599", "0.73047155", "0.72994864", "0.7292316", "0.7271386", "0.7270128", "0.7258065", "0.72569364", "0.7236373", "0.7229446", "0.722323", "0.7193234", "0.7188068", "0.71635133", "0.71342677", "0.71189374", "0.71032906", "0.7098817", "0.7080763", "0.70764315", "0.70737034", "0.7060597", "0.7051694", "0.70496494", "0.7037669", "0.7035871", "0.7010598", "0.70044005", "0.7004024", "0.69912386", "0.69817036", "0.6964681", "0.6959402", "0.69418746", "0.69409794", "0.6937643", "0.69295096", "0.6927298", "0.69224507", "0.6916359", "0.68975127", "0.68941694", "0.6893663", "0.6869901", "0.6868054", "0.6857488", "0.6856118", "0.6850243", "0.68390596", "0.6831185", "0.6829578", "0.6817872", "0.6815644", "0.68071765", "0.68055356", "0.6794343", "0.6788232", "0.67827594", "0.67764825", "0.6764113", "0.6752536", "0.6751961", "0.6737342", "0.67268926", "0.6708792", "0.67002124" ]
0.68407655
82
Inserts the element elem with priority prio.
def insert(self, elem, prio): self.n += 1 self.A.append( (e,w) ) self.pos[e] = self.n i = self.n p = i // 2 self.insert_loop(i, p)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_priority(self, elem, prio):\n pos = self.pos[elem]\n currPrio = self.A[pos][1]\n self.A[pos] = (elem, prio)\n if self.cmpFn(prio, currPrio):\n self.insert_loop(pos, pos // 2) # Up heapify\n else:\n self.combine(pos) # Down heapify", "def enqueue(elem: Any, priority: int = 0) -> None:\n\tglobal queue\n\tqueue.append((priority, elem))\n\treturn None", "def insert(self, id, priority):\n self.n += 1\n i = self.n\n while i > 1:\n pIdx = int(i/2)\n p = self.elements[pIdx]\n\n if priority > p[PRIORITY]:\n break\n self.elements[i] = list(p)\n self.positions[p[ID]] = 1\n i = pIdx\n\n self.elements[i][ID] = id\n self.elements[i][PRIORITY] = priority\n self.positions[id] = i", "def push(self, elt):\n if len(self._queue) == 0: self._queue.append(elt); return\n for i in range(len(self._queue)):\n if self._queue[i].priority < elt.priority:\n self._queue.insert(i, elt)\n return\n #if we get here, elt is lower than all the other procs in the queue, so\n #just append it\n self._queue.append(elt)", "def insertElement(self, element , i ):\n\n self.heap[i] = element\n # Parent of ith position\n parenti = i // 2\n\n # Inserting element into the heap\n try:\n # Bubbling up\n if parenti != 0 and self.heap[i].dijkstraCriterion < self.heap[parenti].dijkstraCriterion:\n self.heap[i], self.heap[parenti] = self.heap[parenti], self.heap[i]\n self.insertElement(element, parenti)\n # Incrementing self.i position\n else:\n self.i += 1\n return\n\n except:\n # Bubbling up\n self.heap[i] = 'NaN'\n self.insertElement(element, parenti)\n return", "def append(self,data,priority):\r\n\t\tbisect.insort(self.queue,(priority,data))", "def insert(self, pri):\n heaps = self.priorities\n if pri > 10 or pri < 1:\n raise ValueError(\n 'Priority must be between 1 (high) - 10 (low)'\n )\n if pri not in heaps.keys():\n self._create_priorities(pri)\n\n priority = heaps.get(pri)\n priority.push(self._order)\n self._order += 1", "def enqueue(self, item, priority):\n # TODO: Insert given item into heap\n ...", "def push(self, elem):\n pass", "def insert(self, value, priority=2):\n if not isinstance(priority, int):\n raise TypeError(\"Priority must be an integer\")\n if priority in self.priority_queue:\n self.priority_queue[priority].append(value)\n else:\n self.priority_queue[priority] = [value]\n print(self.priority_queue)", "def insert(self, node, priority=0):\n\n if node in self.entry_finder:\n self.delete(node)\n entry = [priority, node]\n self.entry_finder[node] = entry\n # logger_cagada.debug(\"el puto entry %s\" % entry)\n # logger_cagada.debug(\"l nodo q c agrega %s es %s\" % (type(node), node))\n self.heappush(self.heap, entry)\n # logger_cagada.debug(\"el finde aora es %s\" % self.entry_finder)\n # logger_cagada.debug(\"el heap aora es %s\" % self.heap)\n self.valida_caca()", "def insert(self, element):\n if self.size >= self.maxsize:\n return\n self.size += 1\n self.heap[self.size] = element\n\n current = self.size\n\n while self.heap[current] < self.heap[self.parent(current)]:\n self.swap(current, self.parent(current))\n current = self.parent(current)", "def insert(self, element):\n if self.size >= self.maxsize : \n return\n self.size+= 1\n self.Heap[self.size] = element \n \n current = self.size \n \n while self.Heap[current] < self.Heap[self.parent(current)]: \n self.swap(current, self.parent(current)) \n current = self.parent(current)", "def push(self, element, value):\n insert_pos = 0\n for index, el in enumerate(self.tops):\n if not self.find_min and el[1] >= value:\n insert_pos = index + 1\n elif self.find_min and el[1] <= value:\n insert_pos = index + 1\n self.tops.insert(insert_pos, [element, value])\n self.tops = self.tops[: self.n]", "def insert(self, p, elem):\n node = self._validate(p)\n new_node = self._Node(elem, idx=self._curr_idx, parent=node._parent)\n self._curr_idx += 1\n node._parent = new_node\n new_node._children.append(node)\n self._size += 1\n\n # Invalidate depths and heights after modifying the tree.\n self._depths, self._heights = None, None\n return self._make_position(new_node)", "def insertChildBefore(new_elem, elem):\n parent = DOM.getParent(elem)\n id = DOM.getChildIndex(parent, elem)\n DOM.insertChild(parent, new_elem, id)", "def push(self, priority: float, item):\n heappush(self._heap, (-1 * priority, item))", "def insert(self, pos, element):\n if pos <= 0:\n self.add(element)\n elif pos >= self.length():\n self.append(element)\n else:\n node = Node(element)\n cursor = self.head\n for i in range(pos-1):\n cursor = cursor.next\n node.next = cursor.next\n node.prev = cursor\n cursor.next.prev = node\n cursor.next = node", "def Insert(self, val, extra=None):\n if self._size >= 0:\n if val > self.best[0]:\n idx = bisect.bisect(self.best, val)\n # insert the new element\n if idx == self._size:\n self.best.append(val)\n self.extras.append(extra)\n else:\n self.best.insert(idx, val)\n self.extras.insert(idx, extra)\n # and pop off the head\n self.best.pop(0)\n self.extras.pop(0)\n else:\n idx = bisect.bisect(self.best, val)\n self.best.insert(idx, val)\n self.extras.insert(idx, extra)", "def insert(self, element: Node):\r\n if self._top == None:\r\n self._top = Node(None, element)\r\n return None\r\n new_element = self._add_element(element)\r\n self._correct_tree(new_element)", "def add(self, elem):", "def add(self, elem):", "def _heapify_after_add(self,ele):\r\n parent = self._parent(ele)\r\n if ele > 0 and self._data[ele] < self._data[parent]:\r\n self.swap(ele, parent)\r\n self._heapify_after_add(parent)", "def add(self, item, priority):\n heappush(self.contents, (priority, item))", "def add(self, item, priority=0) -> None:\n if item in self.entry_finder:\n self.remove(item)\n count = next(self.counter)\n entry = (priority, count, [item])\n self.entry_finder[item] = entry\n heapq.heappush(self.priority_queue, entry)", "def addNode(self, element):\n i = 0\n while i < len(self.nodes) and self.nodes[i].weight < element.weight:\n i += 1\n self.nodes.insert(i, element)", "def add(self, item, priority=0):\n if item in self.set:\n self.remove(item)\n count = next(self.counter)\n entry = [priority, count, item]\n self.set[item] = entry\n hpq.heappush(self.heap, entry)", "def test_insert_increases_size(sample_priorityq):\n assert len(sample_priorityq.heap_list) == 0\n sample_priorityq.insert([5, 1])\n assert len(sample_priorityq.heap_list) == 1\n sample_priorityq.insert([6, 2])\n assert len(sample_priorityq.heap_list) == 2", "def add_element(self, elem):\n self.add_element_with_id(elem, self.next_id)", "def insertElement(T,i):\r\n if not isFull(T):\r\n insertInternal(T,i)\r\n else:\r\n m, l, r = split(T)\r\n T.data = [m]\r\n T.child = [l,r]\r\n T.isLeaf = False\r\n k = findChildA(T,i) \r\n insertInternal(T.child[k],i)", "def insert(self, element):\n self.line.append(element)", "def add(self, element):\n\n if self.style == 'FIFO': # If FIFO, append element to end of list\n self.queue.append(element)\n\n elif self.style == 'LIFO': # If LIFO, append element to front of list\n self.queue.insert(0, element)", "def insert(self, idx, element):\n if self._length == self._capacity: # Need to increase size\n self._grow_arr()\n\n if idx < 0: # For negative indexing, convert to positive counterpart\n idx = self._convert_negative_index(idx)\n idx = min(self._length, idx) # Any index over the length is converted\n\n # Move values after idx one right to make room for new element\n for i in range(self._length, idx, -1):\n self._arr[i] = self._arr[i - 1]\n self._arr[idx] = element # Insert element at new blank space\n self._length += 1", "def enqueue(self, element):\n raise NotImplementedError(\"enqueue: You should have implemented this method!\")", "def get_priority(self, elem):\n pos = self.pos[elem]\n return self.A[pos][1]", "def add(element):", "def put_elem(self, elem):\n serialized_elem = self.serialize_elem(elem)\n self.redis_client.lpush(self.buffer_name, serialized_elem)", "def insert_element(self, element):\n if self._size == 0:\n node = self._Node(element, 0, self)\n self._array[0] = node # Add node to root of empty heap\n self._size += 1\n return self.root()\n self._size += 1\n if self._size == self._N:\n self._resize_array(self._N * 2) # Double size of array\n node = self._Node(element, self._size-1, self)\n self._array[self._size-1] = node # Insert new node at end of heap\n self._upheap(node) # Up-heap it to proper location\n return node", "def add(self, task, priority=0):\r\n if task in self.entry_finder:\r\n self.remove(task)\r\n count = next(self.counter)\r\n entry = [priority, count, task]\r\n self.entry_finder[task] = entry\r\n heapq.heappush(self.pq, entry)", "def test_priority_que_success(priority_queue):\n priority_queue.insert(15)\n assert (priority_queue._heap[0].value,\n priority_queue._heap[0].priority) == (15, float('inf'))", "def prepend_element(self, element):\n\n pass", "def add(self, elem: T):\n if elem not in self._unique_values:\n if len(self._heap) < self.maxsize:\n heapq.heappush(self._heap, HeapObj(elem))\n elif elem < self._heap[0].val:\n heapq.heappushpop(self._heap, HeapObj(elem))\n self._unique_values.add(elem)", "def insert_new_element(self, element: LabelMetadata) -> None:\n\n if isinstance(element, dict):\n element = LabelMetadata.from_dict(element)\n if not isinstance(element, LabelMetadata):\n raise TypeError('element must be an LabelMetadata instance, got type {}'.format(type(element)))\n\n if self._elements is None:\n self._elements = [element, ]\n elif len(self._elements) == 0:\n self._elements.append(element)\n else:\n for i, entry in enumerate(self._elements):\n if element.timestamp > entry.timestamp:\n self._elements.insert(i, element)\n break", "def push(self, transition, priority):\n priority = priority * 10000\n priority = self._clip_p(priority)\n priority = int(priority)\n # if we reached the capacity, overwrite the oldest item\n if (self.size == self.capacity):\n self.queue[self.to_write%self.capacity] = transition\n self.sum_tree.update(self.to_write%self.capacity,priority)\n else:\n self.queue.append(transition)\n self.sum_tree.push(priority)\n self.to_write = self.to_write + 1", "def push(self, priority, key):\n index = len(self.__heap)\n self.__position[key] = index\n self.__heap.append([priority, key])\n self.__bubble_up(index)", "def push(self, obj):\n # wrap the object to allow for correct pop operation\n # remember that in python it's a min-heap (not max!)\n wrap_obj = (obj.minus_priority, len(self), obj)\n # use insertion number to ensure we never compare based on obj itself!\n # additionally resolves ties by popping earliest-inserted object\n heapq.heappush(self._heap, wrap_obj)", "def put(self, element):\n self.heap.append(element)\n # sift up the element append before\n self.sift_up(self.size() - 1)", "def test_priority_que_success_priority(priority_queue):\n priority_queue.insert(10)\n priority_queue.insert(5)\n priority_queue.insert(100, 1)\n priority_queue.insert(10, 1)\n assert priority_queue._heap[0].value == 100", "def enqueue(self, element):\n self.the_queue.append(element)", "def add(self, elem, prob):\n self.mask.append( Probability(elem, prob) )", "def enqueue(self, priority, value, key=None):\n key = key if key else value\n if key in self._index:\n self._update(priority, key)\n return\n self._heap.append(Item(priority, value, key))\n self._size = len(self._heap)\n self._index[key] = self._size - 1\n self._sift_up(self._size - 1)", "def add(self, element):\n # add element to the heap\n self.heap.append(element)\n\n # get index of added element and parent of added element\n index = len(self.heap) - 1\n parentIndex = (index - 1) // 2\n\n # swap parents and childs while needed\n while index >= 1 and self.heap[parentIndex][1] > self.heap[index][1]:\n\n # swap parent and child\n swap = self.heap[parentIndex]\n self.heap[parentIndex] = self.heap[index]\n self.heap[index] = swap\n\n # update parent and child indexes\n index = parentIndex\n parentIndex = (index - 1) // 2", "def insert(self, item):\n self._heap.append(item)\n self._perc_up(len(self._heap) - 1)", "def add(self, elem):\n self.data.append(elem)\n self._prune()", "def insere(self, index, elem):\n if index == 0:\n no = No(elem)\n no.prox = self.inicio\n self.inicio = no\n else:\n ponteiro = self._pega_no(index - 1)\n no = No(elem)\n no.prox = ponteiro.prox\n ponteiro.prox = no\n self._size = self._size + 1", "def add(self, element):\n # add element to the heap\n self.heap.append(element)\n\n # get index of added element and parent of added element\n index = len(self.heap) - 1\n parentIndex = (index - 1) // 2\n\n # swap parents and childs while needed\n while index >= 1 and self.heap[parentIndex] < self.heap[index]:\n\n # swap parent and child\n self.swap(parentIndex, index)\n\n # update parent and child indexes\n index = parentIndex\n parentIndex = (index - 1) // 2", "def DocumentElementInsertBefore(self):\n raise NotImplementedError()", "def insert(self, v): # pylint: disable=arguments-differ\n # The policy function can't be amplified", "def insert(self, k): \r\n self.heap_array.append(k)\r\n\r\n current_index = len(self.heap_array) - 1\r\n while (current_index > 0):\r\n parent_index = ((current_index-1)//2)\r\n\r\n if int(self.heap_array[current_index]) > int(self.heap_array[parent_index]): # if no vialation of the min heap property \r\n return\r\n else: # if heap property is broken then swap the parent and child that are breaking the prop \r\n self.heap_array[parent_index], self.heap_array[current_index] = self.heap_array[current_index], self.heap_array[parent_index]\r\n current_index = parent_index", "def add(self, elem):\n self.add_last(elem)", "def add(self, elem):\n assert self._is_int is False\n self._list.append(elem)", "def insert(self, k):\n #Append the element to the min heap\n self.heap_list.append(k)\n #Increase the size of the min heap\n self.current_size += 1\n #Move the value to its appropriate position in the min heap (following the definition of a min heap)\n self.sift_up(self.current_size)", "def add(self, element):\n pass", "def put(self, item, priority=None, *args, **kwargs):\n if priority is None:\n raise self.PQueueException('priority must be specified')\n super().put((priority, item), *args, **kwargs)", "def insert(pq):\n\ti = r.randint(0, bound-1)\n\tpq.put(i)\n\tlogging.info(\"insert %s\", i)", "def setPriority(self, p):\n self.priority = p", "def insert_elements_to_heap(heap, elements):\n for element in elements:\n heap.insert(element)", "def append(self, event, priority):\n self._queue.append((priority, next(self._counter), event))", "def insert(self, i, x) -> None:\n pass", "def enqueue(self, index, trace, priority):\n self.queues[index].put(index, trace, priority)", "def push(self, element):\n self.the_stack.append(element)", "def increase_priority(self):\n if self._priority > 0:\n self._priority -= 1", "def append_element(self, element):\n\n pass", "def push(self, val):\n self.insert(val)", "def _add_element(self, element) -> Node:\r\n current_element = self._top\r\n while True:\r\n if current_element.value() <= element:\r\n if current_element.right_son() == None:\r\n new_son = Node(current_element, element)\r\n current_element.set_right_son(new_son)\r\n current_element = current_element.right_son()\r\n break\r\n else:\r\n current_element = current_element.right_son()\r\n continue\r\n elif current_element.value() > element:\r\n if current_element.left_son() == None:\r\n new_son = Node(current_element, element)\r\n current_element.set_left_son(new_son)\r\n current_element = current_element.left_son()\r\n break\r\n else:\r\n current_element = current_element.left_son()\r\n continue\r\n return current_element", "def insert(self, item):\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.dist_fn))", "def addTask(self, task, priority=0):\n self.queue.put((priority, task))", "def insert(self, value: T) -> None:\n if self._array == []:\n self._array.append(value)\n else:\n parent_idx = (len(self._array) - 1) // 2\n curr_idx = len(self._array)\n self._array.append(value)\n \n # While the value to be inserted is less than it's parent,\n # keep swapping the parent and child from the bottom up until\n # the min heap properties hold or, until swapped with the root node.\n while value < self._array[parent_idx] and parent_idx >= 0:\n temp_value = self._array[parent_idx]\n self._array[parent_idx] = value\n self._array[curr_idx] = temp_value\n curr_idx = parent_idx\n parent_idx = (parent_idx - 1) // 2", "def push(self, item):\n\t\tself.top+=1;\n\t\tself.arr.insert(self.top, item);", "def insert(self, e):\n try:\n self.vals[e] += 1\n except:\n self.vals[e] = 1", "def _update_priority(self, task, prio, worker):\n task.priority = prio = max(prio, task.priority)\n for dep in task.deps or []:\n t = self._state.get_task(dep)\n if t is not None and prio > t.priority:\n self._update_priority(t, prio, worker)", "def addOrReplace(priorityQ, vertex, distance, cellVisited, lastVertex):\n index = getOriginDistance(priorityQ, vertex)\n if index == None:\n if vertex not in cellVisited:\n heapq.heappush(priorityQ, VertexDistance(vertex, lastVertex, distance))\n else:\n if distance < priorityQ[index].distance():\n # print(\"To be delated\", priorityQ[index])\n del priorityQ[index]\n # print(\"After delate\", priorityQ[index])\n heapq.heappush(priorityQ, VertexDistance(vertex, lastVertex, distance))", "def insert(self, val):\n self.data.insert(0,val)\n self.size = self.size + 1", "def _priority_order(id_priority_list, elem):\n assert isinstance(id_priority_list, list)\n # match id types with id priority\n for index, id_elem in enumerate(id_priority_list):\n if elem == id_elem:\n return index\n # the id type is not in id_priority_list so it will be placed last\n return len(id_priority_list) + 1", "def insert(self, item):\n self.heaplist.append(item)\n self.currentsize += 1\n self.shift_item_up(self.currentsize)", "def insert(self, arr, index): # special insertion\n while self.head:\n if arr[index] > arr[self.tail.data]:\n self.del_frm_tail()\n else:\n break\n\n node = Node(index)\n\n if self.head is None:\n self.head = node\n self.tail = node\n else:\n self.tail.next = node\n node.prev = self.tail\n self.tail = node", "def push(self, new_element):\n self.ll.insert_first(new_element)", "def add(self, obs_t, action, reward, obs_tp1, done):\n data = (obs_t, action, reward, obs_tp1, done)\n priority = random.uniform(0, 1)\n if len(self._storage) < self._maxsize:\n heapq.heappush(self._storage, (priority, data))\n elif priority > self._storage[0][0]:\n heapq.heapreplace(self._storage, (priority, data))", "def vsAddElement(self, elem):\n idx = len(self._vs_fields)\n self.vsAddField(\"%d\" % idx, elem)", "def add_priority(self, entity_type, obj_list, comp_name=None, priority=3):\n i = priority\n objects = \", \".join(obj_list)\n args = [\"NAME:UpdatePriorityListData\"]\n if entity_type == 1:\n prio = [\n \"NAME:PriorityListParameters\",\n \"EntityType:=\",\n \"Object\",\n \"EntityList:=\",\n objects,\n \"PriorityNumber:=\",\n i,\n \"PriorityListType:=\",\n \"3D\",\n ]\n self._priorities_args.append(prio)\n args += self._priorities_args\n elif entity_type == 2:\n pcblist = self.modeler.oeditor.Get3DComponentInstanceNames(comp_name)\n prio = [\n \"NAME:PriorityListParameters\",\n \"EntityType:=\",\n \"Component\",\n \"EntityList:=\",\n pcblist[0],\n \"PriorityNumber:=\",\n i,\n \"PriorityListType:=\",\n \"3D\",\n ]\n self._priorities_args.append(prio)\n args += self._priorities_args\n self.modeler.oeditor.UpdatePriorityList([\"NAME:UpdatePriorityListData\"])\n self.modeler.oeditor.UpdatePriorityList(args)\n return True", "def push(self, node):\n self.prepend(node)", "def insert_element_before_similar(self, parent, new_child):\n new_tag = self.tag_base_name(new_child.tag)\n for i, child in enumerate(parent.getchildren()):\n if not self.tag_base_name_is(child, new_tag):\n parent.insert(i, new_child)\n break\n else:\n parent.append(new_child)", "def insert(self, to_insert: Article) -> None:\n heapq.heappush(self.heap, to_insert)", "def test_insert(self):\n self.minheap.heap = [0, 1, 4, 6, 9]\n self.minheap.insert(2)\n assert self.minheap.heap == [0, 1, 2, 6, 9, 4]", "def test_priority_que_success_min_no_priority(priority_queue):\n priority_queue.insert(10)\n priority_queue.insert(5)\n priority_queue.insert(100)\n assert priority_queue._heap[0].value == 10", "def update(self, idx: int, new_priority: T.Union[int, float]):\n old_priority, item = self.__heap[idx]\n self.__heap[idx] = (new_priority, item)\n\n if new_priority < old_priority:\n self.__sift_up(idx)\n else:\n self.__sift_down(idx)", "def insert(self, new_element, position):\n current = self.head\n count = 1\n if position > 1:\n while ((current)and (count < position)):\n if(count == position-1):\n\n new_element.next=current.next\n current.next = new_element\n break\n #print(\"count\",count)\n current = current.next\n count = count + 1\n elif position == 1:\n new_element.next = self.head\n self.head = new_element\n\n pass", "def _insert_op(self, op):", "def store(self, experience, priority):\n self._experience.append(experience)\n insert_index = self.fix_index(priority)\n if insert_index >= 0:\n self.exp_idx.insert(insert_index, len(self._experience) - 1)\n if(self.record_size > self.size):\n #self._experience.pop(0)\n sys.stderr.write(\"Experience overflow!\")\n return True\n\n elif insert_index == -10:\n sys.stderr.write('Insert failed\\n')\n return False", "def InsertElement(self, position, element):\n self.__context.builder.DocumentElementInsert(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n position, element)" ]
[ "0.74379945", "0.73325443", "0.71752936", "0.69993013", "0.65939367", "0.65610933", "0.65355706", "0.6510952", "0.6450642", "0.6369028", "0.63523465", "0.62270975", "0.622298", "0.6217232", "0.6212362", "0.6154356", "0.6117256", "0.61147964", "0.6046682", "0.5996028", "0.59924835", "0.59924835", "0.59768164", "0.59750265", "0.5967423", "0.5926508", "0.5858709", "0.5819772", "0.5814046", "0.57991993", "0.579724", "0.5774576", "0.5753332", "0.57489526", "0.5736074", "0.572934", "0.5727909", "0.5697309", "0.56819147", "0.5665308", "0.5663082", "0.56612384", "0.5659427", "0.5656351", "0.56044847", "0.559476", "0.5575776", "0.55506486", "0.55502325", "0.5549019", "0.55456567", "0.55338216", "0.55266863", "0.5516381", "0.54982185", "0.54952174", "0.54919857", "0.5489563", "0.54721886", "0.54703987", "0.54631954", "0.54551303", "0.5448944", "0.5437324", "0.54309124", "0.5421151", "0.5420622", "0.540423", "0.53868353", "0.5386258", "0.53653693", "0.5358483", "0.5354619", "0.5340443", "0.5332881", "0.5332405", "0.53274524", "0.52859867", "0.52833676", "0.5274859", "0.526342", "0.52593356", "0.5256076", "0.5256063", "0.52544796", "0.52520716", "0.524901", "0.5237828", "0.5220615", "0.52202624", "0.521274", "0.52108705", "0.5209363", "0.52086604", "0.5207853", "0.5204353", "0.5202099", "0.5199711", "0.518521", "0.5170899" ]
0.83393705
0