repo
stringlengths
3
91
file
stringlengths
16
152
code
stringlengths
0
3.77M
file_length
int64
0
3.77M
avg_line_length
float64
0
16k
max_line_length
int64
0
273k
extension_type
stringclasses
1 value
OpenPSG
OpenPSG-main/openpsg/utils/vis_tools/detectron_viz.py
import colorsys import math import cv2 import matplotlib as mpl import matplotlib.colors as mplc import numpy as np import pycocotools.mask as mask_util import torch from detectron2.data.catalog import MetadataCatalog from detectron2.structures import (BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes) from detectron2.utils.colormap import random_color from detectron2.utils.file_io import PathManager from detectron2.utils.visualizer import (_KEYPOINT_THRESHOLD, _LARGE_MASK_AREA_THRESH, _OFF_WHITE, _RED, _SMALL_OBJECT_AREA_THRESH, ColorMode, GenericMask, VisImage, _create_text_labels, _PanopticPrediction) from PIL import Image class Visualizer: """Visualizer that draws data about detection/segmentation on images. It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` that draw primitive objects to images, as well as high-level wrappers like `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` that draw composite data in some pre-defined style. Note that the exact visualization style for the high-level wrappers are subject to change. Style such as color, opacity, label contents, visibility of labels, or even the visibility of objects themselves (e.g. when the object is too small) may change according to different heuristics, as long as the results still look visually reasonable. To obtain a consistent style, you can implement custom drawing functions with the abovementioned primitive methods instead. If you need more customized visualization styles, you can process the data yourself following their format documented in tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not intend to satisfy everyone's preference on drawing styles. This visualizer focuses on high rendering quality rather than performance. It is not designed to be used for real-time applications. """ # TODO implement a fast, rasterized version using OpenCV def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): """ Args: img_rgb: a numpy array of shape (H, W, C), where H and W correspond to the height and width of the image respectively. C is the number of color channels. The image is required to be in RGB format since that is a requirement of the Matplotlib library. The image is also expected to be in the range [0, 255]. metadata (Metadata): dataset metadata (e.g. class names and colors) instance_mode (ColorMode): defines one of the pre-defined style for drawing instances on an image. """ self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) if metadata is None: metadata = MetadataCatalog.get('__nonexist__') self.metadata = metadata self.output = VisImage(self.img, scale=scale) self.cpu_device = torch.device('cpu') # too small texts are useless, therefore clamp to 9 self._default_font_size = max( np.sqrt(self.output.height * self.output.width) // 90, 10 // scale) self._instance_mode = instance_mode self.keypoint_threshold = _KEYPOINT_THRESHOLD def draw_instance_predictions(self, predictions): """Draw instance-level prediction results on an image. Args: predictions (Instances): the output of an instance detection/segmentation model. Following fields will be used to draw: "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). Returns: output (VisImage): image object with visualizations. """ boxes = predictions.pred_boxes if predictions.has( 'pred_boxes') else None scores = predictions.scores if predictions.has('scores') else None classes = predictions.pred_classes.tolist() if predictions.has( 'pred_classes') else None labels = _create_text_labels(classes, scores, self.metadata.get('thing_classes', None)) keypoints = predictions.pred_keypoints if predictions.has( 'pred_keypoints') else None if predictions.has('pred_masks'): masks = np.asarray(predictions.pred_masks) masks = [ GenericMask(x, self.output.height, self.output.width) for x in masks ] else: masks = None if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get( 'thing_colors'): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes ] alpha = 0.8 else: colors = None alpha = 0.5 if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image( self._create_grayscale_image(( predictions.pred_masks.any(dim=0) > 0 ).numpy() if predictions.has('pred_masks') else None)) alpha = 0.3 self.overlay_instances( masks=masks, boxes=boxes, labels=labels, keypoints=keypoints, assigned_colors=colors, alpha=alpha, ) return self.output def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8): """Draw semantic segmentation predictions/labels. Args: sem_seg (Tensor or ndarray): the segmentation of shape (H, W). Each value is the integer label of the pixel. area_threshold (int): segments with less than `area_threshold` are not drawn. alpha (float): the larger it is, the more opaque the segmentations are. Returns: output (VisImage): image object with visualizations. """ if isinstance(sem_seg, torch.Tensor): sem_seg = sem_seg.numpy() labels, areas = np.unique(sem_seg, return_counts=True) sorted_idxs = np.argsort(-areas).tolist() labels = labels[sorted_idxs] for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): try: mask_color = [ x / 255 for x in self.metadata.stuff_colors[label] ] except (AttributeError, IndexError): mask_color = None binary_mask = (sem_seg == label).astype(np.uint8) text = self.metadata.stuff_classes[label] self.draw_binary_mask( binary_mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, ) return self.output def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7): """Draw panoptic prediction annotations or results. Args: panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. If it is a ``list[dict]``, each dict contains keys "id", "category_id". If None, category id of each pixel is computed by ``pixel // metadata.label_divisor``. area_threshold (int): stuff segments with less than `area_threshold` are not drawn. Returns: output (VisImage): image object with visualizations. """ pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image( self._create_grayscale_image(pred.non_empty_mask())) # draw mask for all semantic segments first i.e. "stuff" for mask, sinfo in pred.semantic_masks(): category_idx = sinfo['category_id'] try: mask_color = [ x / 255 for x in self.metadata.stuff_colors[category_idx] ] except AttributeError: mask_color = None text = self.metadata.stuff_classes[category_idx] self.draw_binary_mask( mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, ) # draw mask for all instances second all_instances = list(pred.instance_masks()) if len(all_instances) == 0: return self.output masks, sinfo = list(zip(*all_instances)) category_ids = [x['category_id'] for x in sinfo] try: scores = [x['score'] for x in sinfo] except KeyError: scores = None labels = _create_text_labels(category_ids, scores, self.metadata.thing_classes, [x.get('iscrowd', 0) for x in sinfo]) try: colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] except AttributeError: colors = None self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha) return self.output draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility def draw_dataset_dict(self, dic): """Draw annotations/segmentaions in Detectron2 Dataset format. Args: dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. Returns: output (VisImage): image object with visualizations. """ annos = dic.get('annotations', None) if annos: if 'segmentation' in annos[0]: masks = [x['segmentation'] for x in annos] else: masks = None if 'keypoints' in annos[0]: keypts = [x['keypoints'] for x in annos] keypts = np.array(keypts).reshape(len(annos), -1, 3) else: keypts = None boxes = [ BoxMode.convert(x['bbox'], x['bbox_mode'], BoxMode.XYXY_ABS) if len(x['bbox']) == 4 else x['bbox'] for x in annos ] colors = None category_ids = [x['category_id'] for x in annos] if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get( 'thing_colors'): colors = [ self._jitter( [x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] names = self.metadata.get('thing_classes', None) labels = _create_text_labels( category_ids, scores=None, class_names=names, is_crowd=[x.get('iscrowd', 0) for x in annos], ) self.overlay_instances(labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors) sem_seg = dic.get('sem_seg', None) if sem_seg is None and 'sem_seg_file_name' in dic: with PathManager.open(dic['sem_seg_file_name'], 'rb') as f: sem_seg = Image.open(f) sem_seg = np.asarray(sem_seg, dtype='uint8') if sem_seg is not None: self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) pan_seg = dic.get('pan_seg', None) if pan_seg is None and 'pan_seg_file_name' in dic: with PathManager.open(dic['pan_seg_file_name'], 'rb') as f: pan_seg = Image.open(f) pan_seg = np.asarray(pan_seg) from panopticapi.utils import rgb2id pan_seg = rgb2id(pan_seg) if pan_seg is not None: segments_info = dic['segments_info'] pan_seg = torch.tensor(pan_seg) self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) return self.output def overlay_instances( self, *, boxes=None, labels=None, masks=None, keypoints=None, assigned_colors=None, alpha=0.5, ): """ Args: boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, or a :class:`RotatedBoxes`, or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image, labels (list[str]): the text to be displayed for each instance. masks (masks-like object): Supported types are: * :class:`detectron2.structures.PolygonMasks`, :class:`detectron2.structures.BitMasks`. * list[list[ndarray]]: contains the segmentation masks for all objects in one image. The first level of the list corresponds to individual instances. The second level to all the polygon that compose the instance, and the third level to the polygon coordinates. The third level should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). * list[ndarray]: each ndarray is a binary mask of shape (H, W). * list[dict]: each dict is a COCO-style RLE. keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), where the N is the number of instances and K is the number of keypoints. The last dimension corresponds to (x, y, visibility or score). assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = 0 if boxes is not None: boxes = self._convert_boxes(boxes) num_instances = len(boxes) if masks is not None: masks = self._convert_masks(masks) if num_instances: assert len(masks) == num_instances else: num_instances = len(masks) if keypoints is not None: if num_instances: assert len(keypoints) == num_instances else: num_instances = len(keypoints) keypoints = self._convert_keypoints(keypoints) if labels is not None: assert len(labels) == num_instances if assigned_colors is None: assigned_colors = [ random_color(rgb=True, maximum=1) for _ in range(num_instances) ] if num_instances == 0: return self.output if boxes is not None and boxes.shape[1] == 5: return self.overlay_rotated_instances( boxes=boxes, labels=labels, assigned_colors=assigned_colors) # Display in largest to smallest order to reduce occlusion. areas = None if boxes is not None: areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) elif masks is not None: areas = np.asarray([x.area() for x in masks]) if areas is not None: sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] if boxes is not None else None labels = [labels[k] for k in sorted_idxs] if labels is not None else None masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] keypoints = keypoints[ sorted_idxs] if keypoints is not None else None for i in range(num_instances): color = assigned_colors[i] if boxes is not None: self.draw_box(boxes[i], edge_color=color) if masks is not None: for segment in masks[i].polygons: self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) if labels is not None: # first get a box if boxes is not None: x0, y0, x1, y1 = boxes[i] text_pos = ( x0, y0 ) # if drawing boxes, put text on the box corner. horiz_align = 'left' elif masks is not None: # skip small mask without polygon if len(masks[i].polygons) == 0: continue x0, y0, x1, y1 = masks[i].bbox() # draw text in the center (defined by median) when box is not drawn # median is less sensitive to outliers. text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] horiz_align = 'center' else: continue # drawing the box confidence for keypoints isn't very useful. # for small objects, draw text at the side to avoid occlusion instance_area = (y1 - y0) * (x1 - x0) if (instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale or y1 - y0 < 40 * self.output.scale): if y1 >= self.output.height - 5: text_pos = (x1, y0) else: text_pos = (x0, y1) height_ratio = (y1 - y0) / np.sqrt( self.output.height * self.output.width) lighter_color = self._change_color_brightness( color, brightness_factor=0.7) font_size = (np.clip( (height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size) self.draw_text( labels[i], text_pos, color=lighter_color, horizontal_alignment=horiz_align, font_size=font_size, ) # draw keypoints if keypoints is not None: for keypoints_per_instance in keypoints: self.draw_and_connect_keypoints(keypoints_per_instance) return self.output def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): """ Args: boxes (ndarray): an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image. labels (list[str]): the text to be displayed for each instance. assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = len(boxes) if assigned_colors is None: assigned_colors = [ random_color(rgb=True, maximum=1) for _ in range(num_instances) ] if num_instances == 0: return self.output # Display in largest to smallest order to reduce occlusion. if boxes is not None: areas = boxes[:, 2] * boxes[:, 3] sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] labels = [labels[k] for k in sorted_idxs] if labels is not None else None colors = [assigned_colors[idx] for idx in sorted_idxs] for i in range(num_instances): self.draw_rotated_box_with_label( boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None) return self.output def draw_and_connect_keypoints(self, keypoints): """Draws keypoints of an instance and follows the rules for keypoint connections to draw lines between appropriate keypoints. This follows color heuristics for line color. Args: keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints and the last dimension corresponds to (x, y, probability). Returns: output (VisImage): image object with visualizations. """ visible = {} keypoint_names = self.metadata.get('keypoint_names') for idx, keypoint in enumerate(keypoints): # draw keypoint x, y, prob = keypoint if prob > self.keypoint_threshold: self.draw_circle((x, y), color=_RED) if keypoint_names: keypoint_name = keypoint_names[idx] visible[keypoint_name] = (x, y) if self.metadata.get('keypoint_connection_rules'): for kp0, kp1, color in self.metadata.keypoint_connection_rules: if kp0 in visible and kp1 in visible: x0, y0 = visible[kp0] x1, y1 = visible[kp1] color = tuple(x / 255.0 for x in color) self.draw_line([x0, x1], [y0, y1], color=color) # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip # Note that this strategy is specific to person keypoints. # For other keypoints, it should just do nothing try: ls_x, ls_y = visible['left_shoulder'] rs_x, rs_y = visible['right_shoulder'] mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 except KeyError: pass else: # draw line from nose to mid-shoulder nose_x, nose_y = visible.get('nose', (None, None)) if nose_x is not None: self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) try: # draw line from mid-shoulder to mid-hip lh_x, lh_y = visible['left_hip'] rh_x, rh_y = visible['right_hip'] except KeyError: pass else: mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) return self.output """ Primitive drawing functions: """ def draw_text( self, text, position, *, font_size=None, color='g', horizontal_alignment='center', rotation=0, ): """ Args: text (str): class label position (tuple): a tuple of the x and y coordinates to place text on image. font_size (int, optional): font of the text. If not provided, a font size proportional to the image width is calculated and used. color: color of the text. Refer to `matplotlib.colors` for full list of formats that are accepted. horizontal_alignment (str): see `matplotlib.text.Text` rotation: rotation angle in degrees CCW Returns: output (VisImage): image object with text drawn. """ if not font_size: font_size = self._default_font_size # since the text background is dark, we don't want the text to be dark color = np.maximum(list(mplc.to_rgb(color)), 0.2) color[np.argmax(color)] = max(0.8, np.max(color)) x, y = position self.output.ax.text( x, y, text, size=font_size * self.output.scale, # FIXME # family="sans-serif", bbox={ 'facecolor': 'black', 'alpha': 0.8, 'pad': 0.7, 'edgecolor': 'none' }, verticalalignment='top', horizontalalignment=horizontal_alignment, color=color, zorder=10, rotation=rotation, ) return self.output def draw_box(self, box_coord, alpha=0.5, edge_color='g', line_style='-'): """ Args: box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 are the coordinates of the image's top left corner. x1 and y1 are the coordinates of the image's bottom right corner. alpha (float): blending efficient. Smaller values lead to more transparent masks. edge_color: color of the outline of the box. Refer to `matplotlib.colors` for full list of formats that are accepted. line_style (string): the string to use to create the outline of the boxes. Returns: output (VisImage): image object with box drawn. """ x0, y0, x1, y1 = box_coord width = x1 - x0 height = y1 - y0 linewidth = max(self._default_font_size / 4, 1) self.output.ax.add_patch( mpl.patches.Rectangle( (x0, y0), width, height, fill=False, edgecolor=edge_color, linewidth=linewidth * self.output.scale, alpha=alpha, linestyle=line_style, )) return self.output def draw_rotated_box_with_label(self, rotated_box, alpha=0.5, edge_color='g', line_style='-', label=None): """Draw a rotated box with label on its top-left corner. Args: rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), where cnt_x and cnt_y are the center coordinates of the box. w and h are the width and height of the box. angle represents how many degrees the box is rotated CCW with regard to the 0-degree box. alpha (float): blending efficient. Smaller values lead to more transparent masks. edge_color: color of the outline of the box. Refer to `matplotlib.colors` for full list of formats that are accepted. line_style (string): the string to use to create the outline of the boxes. label (string): label for rotated box. It will not be rendered when set to None. Returns: output (VisImage): image object with box drawn. """ cnt_x, cnt_y, w, h, angle = rotated_box area = w * h # use thinner lines when the box is small linewidth = self._default_font_size / ( 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3) theta = angle * math.pi / 180.0 c = math.cos(theta) s = math.sin(theta) rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] # x: left->right ; y: top->down rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] for k in range(4): j = (k + 1) % 4 self.draw_line( [rotated_rect[k][0], rotated_rect[j][0]], [rotated_rect[k][1], rotated_rect[j][1]], color=edge_color, linestyle='--' if k == 1 else line_style, linewidth=linewidth, ) if label is not None: text_pos = rotated_rect[1] # topleft corner height_ratio = h / np.sqrt(self.output.height * self.output.width) label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) font_size = (np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size) self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) return self.output def draw_circle(self, circle_coord, color, radius=3): """ Args: circle_coord (list(int) or tuple(int)): contains the x and y coordinates of the center of the circle. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. radius (int): radius of the circle. Returns: output (VisImage): image object with box drawn. """ x, y = circle_coord self.output.ax.add_patch( mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)) return self.output def draw_line(self, x_data, y_data, color, linestyle='-', linewidth=None): """ Args: x_data (list[int]): a list containing x values of all the points being drawn. Length of list should match the length of y_data. y_data (list[int]): a list containing y values of all the points being drawn. Length of list should match the length of x_data. color: color of the line. Refer to `matplotlib.colors` for a full list of formats that are accepted. linestyle: style of the line. Refer to `matplotlib.lines.Line2D` for a full list of formats that are accepted. linewidth (float or None): width of the line. When it's None, a default value will be computed and used. Returns: output (VisImage): image object with line drawn. """ if linewidth is None: linewidth = self._default_font_size / 3 linewidth = max(linewidth, 1) self.output.ax.add_line( mpl.lines.Line2D( x_data, y_data, linewidth=linewidth * self.output.scale, color=color, linestyle=linestyle, )) return self.output def draw_binary_mask(self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=0): """ Args: binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and W is the image width. Each value in the array is either a 0 or 1 value of uint8 type. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. text (str): if None, will be drawn in the object's center of mass. alpha (float): blending efficient. Smaller values lead to more transparent masks. area_threshold (float): a connected component small than this will not be shown. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) has_valid_segment = False binary_mask = binary_mask.astype('uint8') # opencv needs uint8 mask = GenericMask(binary_mask, self.output.height, self.output.width) shape2d = (binary_mask.shape[0], binary_mask.shape[1]) if not mask.has_holes: # draw polygons for regular masks for segment in mask.polygons: area = mask_util.area( mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) if area < (area_threshold or 0): continue has_valid_segment = True segment = segment.reshape(-1, 2) self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) else: # TODO: Use Path/PathPatch to draw vector graphics: # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon rgba = np.zeros(shape2d + (4, ), dtype='float32') rgba[:, :, :3] = color rgba[:, :, 3] = (mask.mask == 1).astype('float32') * alpha has_valid_segment = True self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if text is not None and has_valid_segment: # TODO sometimes drawn on wrong objects. the heuristics here can improve. lighter_color = self._change_color_brightness( color, brightness_factor=0.7) _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats( binary_mask, 8) largest_component_id = np.argmax(stats[1:, -1]) + 1 # draw text on the largest component, as well as other very large components. for cid in range(1, _num_cc): if cid == largest_component_id or stats[ cid, -1] > _LARGE_MASK_AREA_THRESH: # median is more stable than centroid # center = centroids[largest_component_id] center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] self.draw_text(text, center, color=lighter_color) return self.output def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): """ Args: segment: numpy array of shape Nx2, containing all the points in the polygon. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. If not provided, a darker shade of the polygon color will be used instead. alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with polygon drawn. """ if edge_color is None: # make edge color darker than the polygon color if alpha > 0.8: edge_color = self._change_color_brightness( color, brightness_factor=-0.7) else: edge_color = color edge_color = mplc.to_rgb(edge_color) + (1, ) polygon = mpl.patches.Polygon( segment, fill=True, facecolor=mplc.to_rgb(color) + (alpha, ), edgecolor=edge_color, linewidth=max(self._default_font_size // 15 * self.output.scale, 1), ) self.output.ax.add_patch(polygon) return self.output """ Internal methods: """ def _jitter(self, color): """Randomly modifies given color to produce a slightly different color than the color given. Args: color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color picked. The values in the list are in the [0.0, 1.0] range. Returns: jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color after being jittered. The values in the list are in the [0.0, 1.0] range. """ color = mplc.to_rgb(color) vec = np.random.rand(3) # better to do it in another color space vec = vec / np.linalg.norm(vec) * 0.5 res = np.clip(vec + color, 0, 1) return tuple(res) def _create_grayscale_image(self, mask=None): """Create a grayscale version of the original image. The colors in masked area, if given, will be kept. """ img_bw = self.img.astype('f4').mean(axis=2) img_bw = np.stack([img_bw] * 3, axis=2) if mask is not None: img_bw[mask] = self.img[mask] return img_bw def _change_color_brightness(self, color, brightness_factor): """Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range. """ assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return modified_color def _convert_boxes(self, boxes): """ Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. """ if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes): return boxes.tensor.detach().numpy() else: return np.asarray(boxes) def _convert_masks(self, masks_or_polygons): """Convert different format of masks or polygons to a tuple of masks and polygons. Returns: list[GenericMask]: """ m = masks_or_polygons if isinstance(m, PolygonMasks): m = m.polygons if isinstance(m, BitMasks): m = m.tensor.numpy() if isinstance(m, torch.Tensor): m = m.numpy() ret = [] for x in m: if isinstance(x, GenericMask): ret.append(x) else: ret.append( GenericMask(x, self.output.height, self.output.width)) return ret def _convert_keypoints(self, keypoints): if isinstance(keypoints, Keypoints): keypoints = keypoints.tensor keypoints = np.asarray(keypoints) return keypoints def get_output(self): """ Returns: output (VisImage): the image output containing the visualizations added to the image. """ return self.output
41,496
41.343878
100
py
OpenPSG
OpenPSG-main/openpsg/utils/vis_tools/datasets.py
from pathlib import Path from detectron2.data import DatasetCatalog, MetadataCatalog from .preprocess import load_json from .viz import get_colormap data_dir = Path('data') # COCO coco_dir = data_dir / 'coco' # coco_img_train_dir = coco_dir / 'train2017' # coco_img_val_dir = coco_dir / 'val2017' coco_detectron_dir = coco_dir / 'detectron' # VG vg_dir = data_dir / 'vg' vg_img_dir = vg_dir / 'VG_100K' vg_detectron_dir = vg_dir / 'detectron' # VRR vrr_dir = data_dir / 'vrr_vg' vrr_img_dir = vg_img_dir vrr_detectron_dir = vrr_dir / 'detectron' # GQA gqa_dir = data_dir / 'gqa' gqa_img_dir = gqa_dir / 'images' gqa_detectron_dir = gqa_dir / 'detectron' def init_coco_panoptic_dataset(): def load_coco_train(): return load_json(coco_detectron_dir / 'train_data.json') def load_coco_val(): return load_json(coco_detectron_dir / 'val_data.json') DatasetCatalog.register('coco_train', load_coco_train) DatasetCatalog.register('coco_val', load_coco_val) thing_cats = load_json(coco_detectron_dir / 'thing_categories.json') stuff_cats = load_json(coco_detectron_dir / 'stuff_categories.json') for name in ['coco_train', 'coco_val']: metadata = MetadataCatalog.get(name) metadata.thing_classes = thing_cats metadata.stuff_classes = stuff_cats metadata.thing_colors = get_colormap(len(thing_cats)) metadata.stuff_colors = get_colormap(len(stuff_cats)) def init_vg_dataset(): def load_vg_train(): return load_json(vg_detectron_dir / 'train_data.json') def load_vg_val(): return load_json(vg_detectron_dir / 'val_data.json') DatasetCatalog.register('vg_train', load_vg_train) DatasetCatalog.register('vg_val', load_vg_val) obj_cats = load_json(vg_detectron_dir / 'object_categories.json') rel_cats = load_json(vg_detectron_dir / 'relation_categories.json') obj_colormap = get_colormap(len(obj_cats)) for name in ['vg_train', 'vg_val']: metadata = MetadataCatalog.get(name) metadata.thing_classes = obj_cats metadata.relation_classes = rel_cats metadata.thing_colors = obj_colormap metadata.thing_dataset_id_to_contiguous_id = { i: i for i in range(len(obj_cats)) } def init_vrr_vg_dataset(): # FIXME Make train / val split? def load_vrr_vg(): return load_json(vrr_detectron_dir / 'data.json') DatasetCatalog.register('vrr_vg', load_vrr_vg) obj_cats = load_json(vrr_detectron_dir / 'object_categories.json') rel_cats = load_json(vrr_detectron_dir / 'relation_categories.json') obj_colormap = get_colormap(len(obj_cats)) metadata = MetadataCatalog.get('vrr_vg') metadata.thing_classes = obj_cats metadata.relation_classes = rel_cats # Set a fixed alternating colormap metadata.thing_colors = obj_colormap metadata.thing_dataset_id_to_contiguous_id = { i: i for i in range(len(obj_cats)) } def init_gqa_dataset(): def load_gqa_train(): return load_json(gqa_detectron_dir / 'train_data.json') def load_gqa_val(): return load_json(gqa_detectron_dir / 'val_data.json') DatasetCatalog.register('gqa_train', load_gqa_train) DatasetCatalog.register('gqa_val', load_gqa_val) obj_cats = load_json(gqa_detectron_dir / 'object_categories.json') rel_cats = load_json(gqa_detectron_dir / 'relation_categories.json') obj_colormap = get_colormap(len(obj_cats)) for name in ['gqa_train', 'gqa_val']: metadata = MetadataCatalog.get(name) metadata.thing_classes = obj_cats metadata.relation_classes = rel_cats metadata.thing_colors = obj_colormap metadata.thing_dataset_id_to_contiguous_id = { i: i for i in range(len(obj_cats)) }
3,835
29.444444
72
py
OpenPSG
OpenPSG-main/openpsg/utils/vis_tools/viz.py
from pathlib import Path from typing import Any, Dict, List, Tuple import cv2 import graphviz import numpy as np from detectron2.data import Metadata from detectron2.data.detection_utils import read_image from detectron2.data.transforms import ScaleTransform from detectron2.utils.colormap import colormap from detectron2.utils.visualizer import VisImage from panopticapi.utils import rgb2id from .. import Visualizer from .preprocess import x1y1wh_to_xyxy def get_colormap(num_colors: int): return (np.resize(colormap(), (num_colors, 3))).tolist() def adjust_text_color(color: Tuple[float, float, float], viz: Visualizer) -> Tuple[float, float, float]: color = viz._change_color_brightness(color, brightness_factor=0.7) color = np.maximum(color, 0.2) color[np.argmax(color)] = max(0.8, np.max(color)) return color def draw_text( viz_img: VisImage = None, text: str = None, x: float = None, y: float = None, color: Tuple[float, float, float] = [0, 0, 0], size: float = 10, padding: float = 5, box_color: str = 'black', font: str = None, ) -> float: text_obj = viz_img.ax.text( x, y, text, size=size, # family="sans-serif", bbox={ 'facecolor': box_color, 'alpha': 0.8, 'pad': padding, 'edgecolor': 'none', }, verticalalignment='top', horizontalalignment='left', color=color, zorder=10, rotation=0, ) viz_img.get_image() text_dims = text_obj.get_bbox_patch().get_extents() return text_dims.width def viz_annotations( data: Dict[str, Any], data_dir: Path, metadata: Metadata, return_graph: bool = True, graph_size: str = '10,10', filter_out_left_right: bool = False, ): """ Parameters ---------- data : Dict[str, Any] In standard Detectron2 format. Scene graph annotations should be stored in the `relations` key, which contains a list of relations, each being a Tuple[subject_idx, object_idx, relation_id] data_dir : Path metadata : Metadata Should contain object / relation class labels, as well as color maps return_graph : bool, optional , by default True graph_size : str, optional , by default "10,10" Returns ------- Tuple[np.ndarray, graphviz.Digraph] RGB image, (H, W, C), [0, 255] """ # Viz instance annotations img = read_image(data_dir / data['file_name'], format='RGB') viz = Visualizer( img, metadata=metadata, # instance_mode=instance_mode, ) viz_out = viz.draw_dataset_dict(data) viz_img = viz_out.get_image() # Viz scene graph if return_graph: g = graphviz.Digraph() g.attr('node', style='filled', shape='record') g.attr(size=graph_size) annos = data['annotations'] # Draw nodes (objects) for idx, obj in enumerate(annos): g.node(f'obj_{idx}', metadata.thing_classes[obj['category_id']], color='orange') # Show scene graph in text format g_text = '' # Draw edges (relations) for idx, rel in enumerate(sorted(data['relations'])): s_idx, o_idx, rel_id = rel # FIXME What about stuff classes? s_name = metadata.thing_classes[annos[s_idx]['category_id']] o_name = metadata.thing_classes[annos[o_idx]['category_id']] rel_name = metadata.relation_classes[rel_id] if filter_out_left_right and rel_name in [ 'to the left of', 'to the right of', ]: continue g_text += f'({s_name} -> {rel_name} -> {o_name})\n' # NOTE Draw w/o explicit node for each edge # g.edge(str(s_idx), str(o_idx), metadata.relation_classes[rel_id]) # Draw with explicit node for each edge g.node(f'rel_{idx}', rel_name, color='lightblue2') g.edge(f'obj_{s_idx}', f'rel_{idx}') g.edge(f'rel_{idx}', f'obj_{o_idx}') return viz_img, g, g_text else: return viz_img def viz_annotations_alt( data: Dict[str, Any], data_dir: Path, obj_cats: List[str], rel_cats: List[str], type: str = 'boxes', # One of {'boxes', 'masks'} show_annos: bool = True, rel_ids_to_keep: List[int] = None, rel_ids_to_filter: List[int] = None, n_rels: int = None, resize: Tuple[int, int] = None, font: str = None, ): ch_font_name = 'Source Han Serif SC' # Viz instance annotations ################################################# img = read_image(data_dir / data['file_name'], format='RGB') if resize: img = cv2.resize(img, resize) # Visualize COCO Annotations ########################################### if type == 'masks': # Load panoptic segmentation seg_map = read_image(data['pan_seg_file_name'], format='RGB') # Convert to segment ids seg_map = rgb2id(seg_map) masks = [] labels_coco = [] for i, s in enumerate(data['segments_info']): label = ( obj_cats[s['category_id']] if s['isthing'] # coco things and stuff are concatenated with each other else obj_cats[s['category_id'] + 80]) labels_coco.append(label) masks.append(seg_map == s['id']) # Prepend instance id labels_coco = [f'{i}-{l}' for i, l in enumerate(labels_coco)] # Choose colors for each instance in coco colormap_coco = get_colormap(len(data['segments_info'])) colormap_coco = (np.array(colormap_coco) / 255).tolist() # Draw coco annotations viz = Visualizer(img) if show_annos: viz.overlay_instances( labels=labels_coco, masks=masks, assigned_colors=colormap_coco, ) viz_img = viz.get_output().get_image() else: viz_img = img elif type == 'boxes': boxes = [] for a in data['annotations']: # Depending on bbox mode if a['bbox_mode'] == 1: box = x1y1wh_to_xyxy(a['bbox']) else: box = a['bbox'] # If resizing image if resize: transform = ScaleTransform( data['height'], data['width'], resize[1], resize[0], ) box = transform.apply_box(np.array(box))[0].tolist() boxes.append(box) boxes = np.array(boxes) # Choose colors for each instance in coco colormap_coco = get_colormap(len(data['annotations'])) colormap_coco = (np.array(colormap_coco) / 255).tolist() labels_coco = [obj_cats[a['category_id']] for a in data['annotations']] # Draw coco annotations viz = Visualizer(img) viz.overlay_instances( labels=labels_coco, boxes=boxes, assigned_colors=colormap_coco, ) viz_img = viz.get_output().get_image() # Draw relationship triplets ############################################### # rel_ids_to_filter = [267, 268] # If using custom number of relations if n_rels is not None: pass elif not rel_ids_to_keep and not rel_ids_to_filter: n_rels = len(data['relations']) elif rel_ids_to_keep: n_rels = len([r for r in data['relations'] if r[2] in rel_ids_to_keep]) elif rel_ids_to_filter: n_rels = len( [r for r in data['relations'] if r[2] not in rel_ids_to_filter]) top_padding = 20 bottom_padding = 20 left_padding = 20 text_size = 10 text_padding = 5 text_height = text_size + 2 * text_padding row_padding = 10 height = (top_padding + bottom_padding + n_rels * (text_height + row_padding) - row_padding) width = resize[0] if resize else data['width'] curr_x = left_padding curr_y = top_padding # Adjust colormaps colormap_coco = [adjust_text_color(c, viz) for c in colormap_coco] viz_graph = VisImage(np.full((height, width, 3), 255)) for i, r in enumerate(data['relations']): s_idx, o_idx, rel_id = r # Filter for specific relations if rel_ids_to_keep: if rel_id not in rel_ids_to_keep: continue elif rel_ids_to_filter: if rel_id in rel_ids_to_filter: continue s_label = labels_coco[s_idx] o_label = labels_coco[o_idx] rel_label = rel_cats[rel_id] # Draw index text_width = draw_text( viz_img=viz_graph, text=f'{i + 1}.', x=curr_x, y=curr_y, size=text_size, padding=text_padding, box_color='white', # font=font, ) curr_x += text_width # Special case for chinese predicates if '…' in rel_label: rel_a, rel_b = rel_label.split('…') # Draw subject text text_width = draw_text( viz_img=viz_graph, text=s_label, x=curr_x, y=curr_y, color=colormap_coco[s_idx], size=text_size, padding=text_padding, # font=font, ) curr_x += text_width # Draw relation text text_width = draw_text( viz_img=viz_graph, text=rel_a, x=curr_x, y=curr_y, size=text_size, padding=text_padding, box_color='gainsboro', # font=font, ) curr_x += text_width # Draw object text text_width = draw_text( viz_img=viz_graph, text=o_label, x=curr_x, y=curr_y, color=colormap_coco[o_idx], size=text_size, padding=text_padding, # font=font, ) curr_x += text_width # Draw relation text text_width = draw_text( viz_img=viz_graph, text=rel_b, x=curr_x, y=curr_y, size=text_size, padding=text_padding, box_color='gainsboro', # font=font, ) curr_x += text_width else: # Draw subject text text_width = draw_text( viz_img=viz_graph, text=s_label, x=curr_x, y=curr_y, color=colormap_coco[s_idx], size=text_size, padding=text_padding, # font=font, ) curr_x += text_width # Draw relation text text_width = draw_text( viz_img=viz_graph, text=rel_label, x=curr_x, y=curr_y, size=text_size, padding=text_padding, box_color='gainsboro', # font=font, ) curr_x += text_width # Draw object text text_width = draw_text( viz_img=viz_graph, text=o_label, x=curr_x, y=curr_y, color=colormap_coco[o_idx], size=text_size, padding=text_padding, # font=font, ) curr_x += text_width curr_x = left_padding curr_y += text_height + row_padding viz_graph = viz_graph.get_image() return np.vstack([viz_img, viz_graph]) def viz_annotations_v1( data: Dict[str, Any], data_dir: Path, obj_cats: List[str], rel_cats: List[str], type: str = 'boxes', # One of {'boxes', 'masks'} show_annos: bool = True, rel_ids_to_keep: List[int] = None, rel_ids_to_filter: List[int] = None, n_rels: int = None, resize: Tuple[int, int] = None, font: str = None, ): ch_font_name = 'Source Han Serif SC' # Viz instance annotations ############################################# img = read_image(data_dir / data['file_name'], format='RGB') if resize: img = cv2.resize(img, resize) # Visualize COCO Annotations ########################################### if type == 'masks': # Load panoptic segmentation seg_map = read_image(data['pan_seg_file_name'], format='RGB') # Convert to segment ids seg_map = rgb2id(seg_map) masks = [] labels_coco = [] for i, s in enumerate(data['segments_info']): label = ( obj_cats[s['category_id']] if s['isthing'] # coco things and stuff are concatenated with each other else obj_cats[s['category_id'] + 80]) labels_coco.append(label) masks.append(seg_map == s['id']) # Choose colors for each instance in coco colormap_coco = get_colormap(len(data['segments_info'])) colormap_coco = (np.array(colormap_coco) / 255).tolist() # Draw coco annotations viz = Visualizer(img) if show_annos: viz.overlay_instances( labels=labels_coco, masks=masks, assigned_colors=colormap_coco, ) viz_img = viz.get_output().get_image() else: viz_img = img elif type == 'boxes': boxes = [] for a in data['annotations']: # Depending on bbox mode if a['bbox_mode'] == 1: box = x1y1wh_to_xyxy(a['bbox']) else: box = a['bbox'] # If resizing image if resize: transform = ScaleTransform( data['height'], data['width'], resize[1], resize[0], ) box = transform.apply_box(np.array(box))[0].tolist() boxes.append(box) boxes = np.array(boxes) # Choose colors for each instance in coco colormap_coco = get_colormap(len(data['annotations'])) colormap_coco = (np.array(colormap_coco) / 255).tolist() labels_coco = [obj_cats[a['category_id']] for a in data['annotations']] # Draw coco annotations viz = Visualizer(img) viz.overlay_instances( labels=labels_coco, boxes=boxes, assigned_colors=colormap_coco, ) viz_img = viz.get_output().get_image() # Draw relationship triplets ############################################### # rel_ids_to_filter = [267, 268] # If using custom number of relations if n_rels is not None: pass elif not rel_ids_to_keep and not rel_ids_to_filter: n_rels = len(data['relations']) elif rel_ids_to_keep: n_rels = len([r for r in data['relations'] if r[2] in rel_ids_to_keep]) elif rel_ids_to_filter: n_rels = len( [r for r in data['relations'] if r[2] not in rel_ids_to_filter]) top_padding = 20 bottom_padding = 20 left_padding = 20 text_size = 10 text_padding = 5 text_height = text_size + 2 * text_padding row_padding = 10 height = (top_padding + bottom_padding + n_rels * (text_height + row_padding) - row_padding) width = resize[0] if resize else data['width'] curr_x = left_padding curr_y = top_padding # Adjust colormaps colormap_coco = [adjust_text_color(c, viz) for c in colormap_coco] viz_graph = VisImage(np.full((height, width, 3), 255)) for i, r in enumerate(data['relations']): s_idx, o_idx, rel_id = r # Filter for specific relations if rel_ids_to_keep: if rel_id not in rel_ids_to_keep: continue elif rel_ids_to_filter: if rel_id in rel_ids_to_filter: continue s_label = labels_coco[s_idx] o_label = labels_coco[o_idx] rel_label = rel_cats[rel_id] # Special case for chinese predicates if '…' in rel_label: rel_a, rel_b = rel_label.split('…') # Draw subject text text_width = draw_text( viz_img=viz_graph, text=s_label, x=curr_x, y=curr_y, color=colormap_coco[s_idx], size=text_size, padding=text_padding, # font=font, ) curr_x += text_width # Draw relation text text_width = draw_text( viz_img=viz_graph, text=rel_a, x=curr_x, y=curr_y, size=text_size, padding=text_padding, box_color='gainsboro', # font=font, ) curr_x += text_width # Draw object text text_width = draw_text( viz_img=viz_graph, text=o_label, x=curr_x, y=curr_y, color=colormap_coco[o_idx], size=text_size, padding=text_padding, # font=font, ) curr_x += text_width # Draw relation text text_width = draw_text( viz_img=viz_graph, text=rel_b, x=curr_x, y=curr_y, size=text_size, padding=text_padding, box_color='gainsboro', # font=font, ) curr_x += text_width else: # Draw subject text text_width = draw_text( viz_img=viz_graph, text=s_label, x=curr_x, y=curr_y, color=colormap_coco[s_idx], size=text_size, padding=text_padding, # font=font, ) curr_x += text_width # Draw relation text text_width = draw_text( viz_img=viz_graph, text=rel_label, x=curr_x, y=curr_y, size=text_size, padding=text_padding, box_color='gainsboro', # font=font, ) curr_x += text_width # Draw object text text_width = draw_text( viz_img=viz_graph, text=o_label, x=curr_x, y=curr_y, color=colormap_coco[o_idx], size=text_size, padding=text_padding, # font=font, ) curr_x += text_width curr_x = left_padding curr_y += text_height + row_padding viz_graph = viz_graph.get_image() return np.vstack([viz_img, viz_graph]) def viz_annotations_psg( data: Dict[str, Any], data_dir: Path, obj_cats: List[str], rel_cats: List[str], type: str = 'boxes', # One of {'boxes', 'masks'} show_annos: bool = True, rel_ids_to_keep: List[int] = None, rel_ids_to_filter: List[int] = None, n_rels: int = None, resize: Tuple[int, int] = None, font: str = None, ): ch_font_name = 'Source Han Serif SC' # Viz instance annotations ################################################# img = read_image(data_dir / data['file_name'], format='RGB') if resize: img = cv2.resize(img, resize) # Visualize COCO Annotations ########################################### if type == 'masks': # Load panoptic segmentation seg_map = read_image(data_dir / data['pan_seg_file_name'], format='RGB') # Convert to segment ids seg_map = rgb2id(seg_map) masks = [] labels_coco = [] for i, s in enumerate(data['segments_info']): label = ( obj_cats[s['category_id']] # if s["isthing"] # # coco things and stuff are concatenated with each other # else obj_cats[s["category_id"] + 80] ) labels_coco.append(label) masks.append(seg_map == s['id']) # Choose colors for each instance in coco colormap_coco = get_colormap(len(data['segments_info'])) colormap_coco = (np.array(colormap_coco) / 255).tolist() # Draw coco annotations viz = Visualizer(img) if show_annos: viz.overlay_instances( labels=labels_coco, masks=masks, assigned_colors=colormap_coco, ) viz_img = viz.get_output().get_image() else: viz_img = img elif type == 'boxes': boxes = [] for a in data['annotations']: # Depending on bbox mode if a['bbox_mode'] == 1: box = x1y1wh_to_xyxy(a['bbox']) else: box = a['bbox'] # If resizing image if resize: transform = ScaleTransform( data['height'], data['width'], resize[1], resize[0], ) box = transform.apply_box(np.array(box))[0].tolist() boxes.append(box) boxes = np.array(boxes) # Choose colors for each instance in coco colormap_coco = get_colormap(len(data['annotations'])) colormap_coco = (np.array(colormap_coco) / 255).tolist() labels_coco = [obj_cats[a['category_id']] for a in data['annotations']] # Draw coco annotations viz = Visualizer(img) viz.overlay_instances( labels=labels_coco, boxes=boxes, assigned_colors=colormap_coco, ) viz_img = viz.get_output().get_image() # Draw relationship triplets ############################################### # rel_ids_to_filter = [267, 268] # If using custom number of relations if n_rels is not None: pass elif not rel_ids_to_keep and not rel_ids_to_filter: n_rels = len(data['relations']) elif rel_ids_to_keep: n_rels = len([r for r in data['relations'] if r[2] in rel_ids_to_keep]) elif rel_ids_to_filter: n_rels = len( [r for r in data['relations'] if r[2] not in rel_ids_to_filter]) top_padding = 20 bottom_padding = 20 left_padding = 20 text_size = 10 text_padding = 5 text_height = text_size + 2 * text_padding row_padding = 10 height = (top_padding + bottom_padding + n_rels * (text_height + row_padding) - row_padding) width = resize[0] if resize else data['width'] curr_x = left_padding curr_y = top_padding # Adjust colormaps colormap_coco = [adjust_text_color(c, viz) for c in colormap_coco] viz_graph = VisImage(np.full((height, width, 3), 255)) for i, r in enumerate(data['relations']): s_idx, o_idx, rel_id = r # Filter for specific relations if rel_ids_to_keep: if rel_id not in rel_ids_to_keep: continue elif rel_ids_to_filter: if rel_id in rel_ids_to_filter: continue s_label = labels_coco[s_idx] o_label = labels_coco[o_idx] rel_label = rel_cats[rel_id] # Special case for chinese predicates if '…' in rel_label: rel_a, rel_b = rel_label.split('…') # Draw subject text text_width = draw_text( viz_img=viz_graph, text=s_label, x=curr_x, y=curr_y, color=colormap_coco[s_idx], size=text_size, padding=text_padding, # font=font, ) curr_x += text_width # Draw relation text text_width = draw_text( viz_img=viz_graph, text=rel_a, x=curr_x, y=curr_y, size=text_size, padding=text_padding, box_color='gainsboro', # font=font, ) curr_x += text_width # Draw object text text_width = draw_text( viz_img=viz_graph, text=o_label, x=curr_x, y=curr_y, color=colormap_coco[o_idx], size=text_size, padding=text_padding, # font=font, ) curr_x += text_width # Draw relation text text_width = draw_text( viz_img=viz_graph, text=rel_b, x=curr_x, y=curr_y, size=text_size, padding=text_padding, box_color='gainsboro', # font=font, ) curr_x += text_width else: # Draw subject text text_width = draw_text( viz_img=viz_graph, text=s_label, x=curr_x, y=curr_y, color=colormap_coco[s_idx], size=text_size, padding=text_padding, # font=font, ) curr_x += text_width # Draw relation text text_width = draw_text( viz_img=viz_graph, text=rel_label, x=curr_x, y=curr_y, size=text_size, padding=text_padding, box_color='gainsboro', # font=font, ) curr_x += text_width # Draw object text text_width = draw_text( viz_img=viz_graph, text=o_label, x=curr_x, y=curr_y, color=colormap_coco[o_idx], size=text_size, padding=text_padding, # font=font, ) curr_x += text_width curr_x = left_padding curr_y += text_height + row_padding viz_graph = viz_graph.get_image() return np.vstack([viz_img, viz_graph])
26,878
28.408096
80
py
OpenPSG
OpenPSG-main/openpsg/utils/vis_tools/__init__.py
from .datasets import init_coco_panoptic_dataset, init_vg_dataset, init_vrr_vg_dataset, init_gqa_dataset from .detectron_viz import Visualizer from .postprocess import process_gqa_and_coco, process_vrr_and_coco, process_vg_and_coco, compute_gqa_coco_overlap, psg_to_kaihua from .preprocess import process_vg_150_to_detectron, process_vrr_vg_to_detectron, process_coco_panoptic_to_detectron, process_gqa_to_detectron __all__ = [ 'init_coco_panoptic_dataset', 'init_vg_dataset', 'init_vrr_vg_dataset', 'init_gqa_dataset', 'Visualizer', 'process_gqa_and_coco', 'process_vrr_and_coco', 'process_vg_and_coco', 'compute_gqa_coco_overlap', 'psg_to_kaihua', 'process_vg_150_to_detectron', 'process_vrr_vg_to_detectron', 'process_coco_panoptic_to_detectron', 'process_gqa_to_detectron' ]
812
53.2
143
py
OpenPSG
OpenPSG-main/openpsg/utils/vis_tools/preprocess.py
import json from pathlib import Path from typing import Tuple import h5py import numpy as np import xmltodict from detectron2.data.transforms import ScaleTransform from tqdm import tqdm def x1y1wh_to_xyxy(xywh): """Convert [x1 y1 w h] box format to [x1 y1 x2 y2] format.""" if isinstance(xywh, (list, tuple)): # Single box given as a list of coordinates assert len(xywh) == 4 x1, y1 = xywh[0], xywh[1] x2 = x1 + np.maximum(0.0, xywh[2] - 1.0) y2 = y1 + np.maximum(0.0, xywh[3] - 1.0) xyxy = [x1, y1, x2, y2] return [int(c) for c in xyxy] elif isinstance(xywh, np.ndarray): # Multiple boxes given as a 2D ndarray return np.hstack( (xywh[:, 0:2], xywh[:, 0:2] + np.maximum(0, xywh[:, 2:4] - 1))) else: raise TypeError('Argument xywh must be a list, tuple, or numpy array.') def xyxy_to_x1y1wh( xyxy: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: """Convert [x1 y1 x2 y2] box format to [x1 y1 w h] format.""" x1, y1, x2, y2 = xyxy w = x2 - x1 h = y2 - y1 return [x1, y1, w, h] def xcycwh_to_xyxy( xywh: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: """Convert [xc yc w h] box format to [x1 y1 x2 y2] format.""" xc, yc, w, h = xywh x1 = xc - w / 2 y1 = yc - h / 2 x2 = xc + w / 2 y2 = yc + h / 2 xyxy = [x1, y1, x2, y2] return [int(c) for c in xyxy] def xyxy_to_xcycwh( xyxy: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]: """Convert [x1 y1 x2 y2] box format to [xc yc w h] format.""" x1, y1, x2, y2 = xyxy w = x2 - x1 h = y2 - y1 xc = x1 + w / 2 yc = y1 + h / 2 xywh = [xc, yc, w, h] return [int(c) for c in xywh] def segment_to_bbox(seg_mask: np.ndarray) -> Tuple[int, int, int, int]: """ Parameters ---------- seg_mask : np.ndarray Boolean mask containing segmented object, (H, W) Returns ------- Tuple[int, int, int, int] [x1, y1, x2, y2] """ ind = np.nonzero(seg_mask.any(axis=0))[0] x1, x2 = ind[0], ind[-1] # Indices of non-empty rows ind = np.nonzero(seg_mask.any(axis=1))[0] y1, y2 = ind[0], ind[-1] bbox = [x1, y1, x2, y2] return bbox def process_vg_bbox(og_height: int, og_width: int, bbox: Tuple[int, int, int, int], resize: int = 1024) -> Tuple[int, int, int, int]: """For VG_150 dataset. Rescales the bbox coords back to the original. Parameters ---------- og_height : int Original image height og_width : int Original image width bbox : Tuple[int, int, int, int] In XYXY format resize : int, optional The dim that the image was resized to in VG_150, by default 1024 Returns ------- Tuple[int, int, int, int] Original bbox in XYXY format """ if og_height > og_width: height = resize width = int(resize / og_height * og_width) else: width = resize height = int(resize / og_width * og_height) transform = ScaleTransform(height, width, og_height, og_width) og_bbox = transform.apply_box(np.array(bbox))[0].tolist() return og_bbox def resize_bbox(old_height: int, old_width: int, bbox: Tuple[int, int, int, int], resize: int): if old_height > old_width: new_height = resize new_width = int(resize / old_height * old_width) else: new_width = resize new_height = int(resize / old_width * old_height) transform = ScaleTransform(old_height, old_width, new_height, new_width) new_bbox = transform.apply_box(np.array(bbox))[0].tolist() return [int(b) for b in new_bbox] def load_json(path: Path): with path.open() as f: data = json.load(f) return data def save_json(obj, path: Path): with path.open('w') as f: json.dump(obj, f) def load_xml(path: Path): with path.open() as f: data = xmltodict.parse(f.read()) return data def process_vg_150_to_detectron( img_json_path: Path, metadata_json_path: Path, data_path: Path, output_dir: Path, val_split_idx: int = 75651, ): img_data = load_json(img_json_path) vg_metadata = load_json(metadata_json_path) print(f'{len(img_data)} Annotations Found.') # Extract VG Categories ################################################### obj_cats = sorted(list(vg_metadata['idx_to_label'].values())) attr_cats = sorted(list(vg_metadata['idx_to_attribute'].values())) rel_cats = sorted(list(vg_metadata['idx_to_predicate'].values())) print(f'{len(obj_cats)} Object Categories') print(f'{len(attr_cats)} Attribute Categories') print(f'{len(rel_cats)} Relation Categories') # Save categories to JSON file obj_cats_save_path = output_dir / 'object_categories.json' save_json(obj_cats, obj_cats_save_path) print(f'Object categories saved to {obj_cats_save_path}') attr_cats_save_path = output_dir / 'attribute_categories.json' save_json(attr_cats, attr_cats_save_path) print(f'Attribute categories saved to {attr_cats_save_path}') rel_cats_save_path = output_dir / 'relation_categories.json' save_json(rel_cats, rel_cats_save_path) print(f'Relation categories saved to {rel_cats_save_path}') obj_to_id = {obj: i for i, obj in enumerate(obj_cats)} attr_to_id = {attr: i for i, attr in enumerate(attr_cats)} rel_to_id = {rel: i for i, rel in enumerate(rel_cats)} # Process to Detectron2 Format ############################################ # Extract data from h5py with h5py.File(data_path, 'r') as f: img_to_first_box = f['img_to_first_box'][:] # (N,) img_to_last_box = f['img_to_last_box'][:] # (N,) img_to_first_rel = f['img_to_first_rel'][:] # (N,) img_to_last_rel = f['img_to_last_rel'][:] # (N,) attributes = f['attributes'][:] # (N_b, 10) boxes_1024 = f['boxes_1024'][:] # (N_b, 4) labels = f['labels'][:] # (N_b, 1) relationships = f['relationships'][:] # (N_r, 2) predicates = f['predicates'][:] # (N_r, 1) split = f['split'][:] # (N,) for name, (start_idx, end_idx) in zip( ['train_data', 'val_data'], [(0, val_split_idx), (val_split_idx, len(img_to_first_box))], ): output_dicts = [] for img_idx in tqdm(range(start_idx, end_idx)): out = {} img = img_data[img_idx] # Store img info image_id = img['image_id'] # FIXME Temp change # out["file_name"] = f"{image_id}.jpg" out['file_name'] = img['file_name'] out['height'] = img['height'] out['width'] = img['width'] out['image_id'] = str(image_id) # Store bbox out['annotations'] = [] # Keep an obj_id to idx mapping obj_id_to_idx = {} first_box_idx = img_to_first_box[img_idx] last_box_idx = img_to_last_box[img_idx] # Store per box annotations for i, box_idx in enumerate(range(first_box_idx, last_box_idx + 1)): anno = {} # Store bbox coords bbox = boxes_1024[box_idx].tolist() # FIXME Take note of box format bbox = xcycwh_to_xyxy(bbox) bbox = [int(b) for b in bbox] # Transform to original coords bbox = process_vg_bbox( img['height'], img['width'], bbox, ) anno['bbox'] = bbox anno['bbox_mode'] = 0 # Store obj id old_obj_id = labels[box_idx][0] obj_name = vg_metadata['idx_to_label'][str(old_obj_id)] obj_id = obj_to_id[obj_name] anno['category_id'] = obj_id # Store attributes anno['attribute_ids'] = [] attrs = attributes[box_idx].tolist() for old_attr_id in attrs: if old_attr_id != 0: attr_name = vg_metadata['idx_to_attribute'][str( old_attr_id)] attr_id = attr_to_id[attr_name] anno['attribute_ids'].append(attr_id) obj_id_to_idx[box_idx] = i out['annotations'].append(anno) # Store relations out['relations'] = [] first_rel_idx = img_to_first_rel[img_idx] last_rel_idx = img_to_last_rel[img_idx] # If there exist relationships if first_rel_idx != -1 and last_rel_idx != -1: for rel_idx in range(first_rel_idx, last_rel_idx + 1): old_rel_id = predicates[rel_idx][0] rel_name = vg_metadata['idx_to_predicate'][str(old_rel_id)] rel_id = rel_to_id[rel_name] s_idx = obj_id_to_idx[relationships[rel_idx][0]] o_idx = obj_id_to_idx[relationships[rel_idx][1]] out['relations'].append([s_idx, o_idx, rel_id]) output_dicts.append(out) # Save data to a JSON file data_save_path = output_dir / f'{name}.json' print(f'{name} in Detectron2 format saved to {data_save_path}') save_json(output_dicts, data_save_path) def process_vrr_vg_to_detectron( data_dir: Path, output_dir: Path, ): vrr_xmls = list(data_dir.glob('*.xml')) print(f'{len(vrr_xmls)} Annotations Found.') # Extract VRR Categories ################################################### obj_cats = set() attr_cats = set() rel_cats = set() for xml in tqdm(vrr_xmls): data = load_xml(xml)['annotation'] for obj in data['object']: obj_cats.add(obj['name']) if 'attribute' in obj: attr = obj['attribute'] if isinstance(attr, str): attr_cats.add(attr) elif isinstance(attr, list): attr_cats.update(attr) else: raise Exception('Unknown attribute type!') relations = data['relation'] if isinstance(relations, dict): rel_cats.add(relations['predicate']) elif isinstance(relations, list): rel_cats.update(r['predicate'] for r in relations) else: raise Exception('Unknown relation type!') obj_cats = sorted(list(obj_cats)) attr_cats = sorted(list(attr_cats)) rel_cats = sorted(list(rel_cats)) print(f'{len(obj_cats)} Object Categories') print(f'{len(attr_cats)} Attribute Categories') print(f'{len(rel_cats)} Relation Categories') # Save categories to JSON file obj_cats_save_path = output_dir / 'object_categories.json' save_json(obj_cats, obj_cats_save_path) print(f'Object categories saved to {obj_cats_save_path}') attr_cats_save_path = output_dir / 'attribute_categories.json' save_json(attr_cats, attr_cats_save_path) print(f'Attribute categories saved to {attr_cats_save_path}') rel_cats_save_path = output_dir / 'relation_categories.json' save_json(rel_cats, rel_cats_save_path) print(f'Relation categories saved to {rel_cats_save_path}') obj_to_id = {obj: i for i, obj in enumerate(obj_cats)} attr_to_id = {attr: i for i, attr in enumerate(attr_cats)} rel_to_id = {rel: i for i, rel in enumerate(rel_cats)} # Process to Detectron2 Format ############################################# # FIXME Just convert all xmls to json first? output_dicts = [] for xml in tqdm(vrr_xmls): out = {} data = load_xml(xml)['annotation'] out['file_name'] = data['filename'] out['height'] = int(data['size']['height']) out['width'] = int(data['size']['width']) out['image_id'] = str(data['source']['image_id']) out['annotations'] = [] out['relations'] = [] # Keep an obj_id to idx mapping obj_id_to_idx = {} for i, obj in enumerate(data['object']): anno = {} # Store bbox bbox = obj['bndbox'] anno['bbox'] = [ float(bbox['xmin']), float(bbox['ymin']), float(bbox['xmax']), float(bbox['ymax']), ] anno['bbox_mode'] = 0 anno['category_id'] = obj_to_id[obj['name']] # Store attributes anno['attribute_ids'] = [] if 'attribute' in obj: attr = obj['attribute'] if isinstance(attr, str): attr = [attr] anno['attribute_ids'].extend([attr_to_id[a] for a in attr]) obj_id_to_idx[obj['object_id']] = i out['annotations'].append(anno) # Store relations relations = data['relation'] if isinstance(relations, dict): relations = [relations] for rel in relations: s_idx = obj_id_to_idx[rel['subject_id']] o_idx = obj_id_to_idx[rel['object_id']] rel_id = rel_to_id[rel['predicate']] out['relations'].append([s_idx, o_idx, rel_id]) output_dicts.append(out) # Save data to a JSON file data_save_path = output_dir / f'data.json' print(f'Detectron2 format saved to {data_save_path}') save_json(output_dicts, data_save_path) def process_coco_panoptic_to_detectron( train_json_path: Path, val_json_path: Path, panoptic_img_train_dir: Path, panoptic_img_val_dir: Path, output_dir: Path, ): train_data = load_json(train_json_path) val_data = load_json(val_json_path) # Extract COCO Thing / Stuff Categories #################################### cats_dict = train_data['categories'] old_id_to_cat_data = {cat['id']: cat for cat in cats_dict} thing_cats = sorted([cat['name'] for cat in cats_dict if cat['isthing']]) stuff_cats = sorted( [cat['name'] for cat in cats_dict if not cat['isthing']]) thing_cats_save_path = output_dir / 'thing_categories.json' save_json(thing_cats, thing_cats_save_path) print(f'Thing categories saved to {thing_cats_save_path}') stuff_cats_save_path = output_dir / 'stuff_categories.json' save_json(stuff_cats, stuff_cats_save_path) print(f'Attribute categories saved to {stuff_cats_save_path}') # Mapping from old_id -> new_id new_thing_cat_id_map = { cat['id']: thing_cats.index(cat['name']) for cat in cats_dict if cat['isthing'] } new_stuff_cat_id_map = { cat['id']: stuff_cats.index(cat['name']) for cat in cats_dict if not cat['isthing'] } # Process to Detectron2 Format ############################################# for name, data, img_dir, panoptic_dir in zip( ['train_data', 'val_data'], [train_data, val_data], ['train2017', 'val2017'], [panoptic_img_train_dir, panoptic_img_val_dir], ): print(f'Processing {name}...') # Mapping from image_id -> image_data img_id_to_img_data = {} for img in data['images']: img_id_to_img_data[img['id']] = img output_dicts = [] for anno in tqdm(data['annotations']): out = {} img = img_id_to_img_data[anno['image_id']] out['file_name'] = img_dir + '/' + img['file_name'] out['height'] = img['height'] out['width'] = img['width'] out['image_id'] = str(anno['image_id']) out['pan_seg_file_name'] = str(panoptic_dir / anno['file_name']) out['segments_info'] = [] for segment in anno['segments_info']: isthing = old_id_to_cat_data[segment['category_id']]['isthing'] id_map = new_thing_cat_id_map if isthing else new_stuff_cat_id_map category_id = id_map[segment['category_id']] out['segments_info'].append({ 'id': segment['id'], 'category_id': category_id, 'iscrowd': segment['iscrowd'], 'isthing': isthing, }) output_dicts.append(out) # Save data to a JSON file data_save_path = output_dir / f'{name}.json' print(f'{name} in Detectron2 format saved to {data_save_path}') save_json(output_dicts, data_save_path) def process_gqa_to_detectron( train_scene_graphs_path: Path, val_scene_graphs_path: Path, output_dir: Path, ): train_data = load_json(train_scene_graphs_path) val_data = load_json(val_scene_graphs_path) print(f'{len(train_data)} train images') print(f'{len(val_data)} val images') # Extract GQA Categories ################################################### obj_cats = set() attr_cats = set() rel_cats = set() # Iterate through train data print('Extracting categories from train data...') for img_id, img in tqdm(train_data.items()): for obj_id, obj in img['objects'].items(): obj_cats.add(obj['name']) attr_cats.update(obj['attributes']) for rel in obj['relations']: rel_cats.add(rel['name']) # Iterate through val data print('Extracting categories from val data...') for img_id, img in tqdm(val_data.items()): for obj_id, obj in img['objects'].items(): obj_cats.add(obj['name']) attr_cats.update(obj['attributes']) for rel in obj['relations']: rel_cats.add(rel['name']) obj_cats = sorted(list(obj_cats)) attr_cats = sorted(list(attr_cats)) rel_cats = sorted(list(rel_cats)) print(f'{len(obj_cats)} Object Categories') print(f'{len(attr_cats)} Attribute Categories') print(f'{len(rel_cats)} Relation Categories') # Save categories to JSON file obj_cats_save_path = output_dir / 'object_categories.json' save_json(obj_cats, obj_cats_save_path) print(f'Object categories saved to {obj_cats_save_path}') attr_cats_save_path = output_dir / 'attribute_categories.json' save_json(attr_cats, attr_cats_save_path) print(f'Attribute categories saved to {attr_cats_save_path}') rel_cats_save_path = output_dir / 'relation_categories.json' save_json(rel_cats, rel_cats_save_path) print(f'Relation categories saved to {rel_cats_save_path}') obj_to_id = {obj: i for i, obj in enumerate(obj_cats)} attr_to_id = {attr: i for i, attr in enumerate(attr_cats)} rel_to_id = {rel: i for i, rel in enumerate(rel_cats)} # Process to Detectron2 Format ############################################# # Process both train and val data for name, data in zip(['train_data', 'val_data'], [train_data, val_data]): print(f'Processing {name}...') output_dicts = [] for img_id, img in tqdm(data.items()): # Processed dict for each image out = {} out['file_name'] = f'{img_id}.jpg' out['height'] = img['height'] out['width'] = img['width'] out['image_id'] = str(img_id) # Auxiliary information out['location'] = img['location'] if 'location' in img else '' out['weather'] = img['weather'] if 'weather' in img else '' out['annotations'] = [] out['relations'] = [] # Keep an obj_id to idx mapping obj_id_to_idx = {} for i, (obj_id, obj) in enumerate(img['objects'].items()): anno = {} # Store bbox anno['bbox'] = [obj['x'], obj['y'], obj['w'], obj['h']] anno['bbox_mode'] = 1 anno['category_id'] = obj_to_id[obj['name']] # Store attributes anno['attribute_ids'] = [ attr_to_id[attr_name] for attr_name in obj['attributes'] ] obj_id_to_idx[obj_id] = i out['annotations'].append(anno) # Store relations for rel in obj['relations']: out['relations'].append( [obj_id, rel['object'], rel_to_id[rel['name']]]) # Convert obj_ids to idx for rel in out['relations']: rel[0] = obj_id_to_idx[rel[0]] rel[1] = obj_id_to_idx[rel[1]] output_dicts.append(out) # Save data to a JSON file data_save_path = output_dir / f'{name}.json' print(f'{name} in Detectron2 format saved to {data_save_path}') save_json(output_dicts, data_save_path)
20,965
31.00916
82
py
OpenPSG
OpenPSG-main/openpsg/utils/vis_tools/postprocess.py
from collections import Counter, defaultdict from pathlib import Path import cv2 import h5py import numpy as np from detectron2.data import DatasetCatalog from detectron2.data.detection_utils import read_image from detectron2.data.transforms import ScaleTransform from detectron2.structures import BitMasks, Boxes, pairwise_ioa, pairwise_iou from panopticapi.utils import rgb2id from tqdm import tqdm from .datasets import (init_coco_panoptic_dataset, init_gqa_dataset, init_vg_dataset, init_vrr_vg_dataset) from .preprocess import (load_json, resize_bbox, save_json, segment_to_bbox, x1y1wh_to_xyxy, xyxy_to_xcycwh) def process_gqa_and_coco( gqa_img_dir: Path, gqa_img_rs_dir: Path, output_dir: Path, vg_id_to_coco_id_path: Path = Path('data/vg/vg_id_to_coco_id.json'), ): init_coco_panoptic_dataset() init_gqa_dataset() # Get and combine datasets coco_train_dataset = DatasetCatalog.get('coco_train') coco_val_dataset = DatasetCatalog.get('coco_val') coco_dataset = coco_train_dataset + coco_val_dataset gqa_train_dataset = DatasetCatalog.get('gqa_train') gqa_val_dataset = DatasetCatalog.get('gqa_val') gqa_dataset = gqa_train_dataset + gqa_val_dataset # Check GQA overlap with COCO vg_id_to_coco_id = load_json(vg_id_to_coco_id_path) vg_coco_ids = set(vg_id_to_coco_id.keys()) gqa_ids = set(d['image_id'] for d in gqa_dataset) vg_overlap_ids = vg_coco_ids & gqa_ids # Merge GQA and COCO id_to_coco_data = {d['image_id']: d for d in coco_dataset} merged_dataset = [] for gqa_d in tqdm(gqa_dataset): vg_id = gqa_d['image_id'] if vg_id in vg_id_to_coco_id: coco_id = vg_id_to_coco_id[vg_id] merged_dataset.append((gqa_d, id_to_coco_data[coco_id])) # Resize GQA images to COCO dimensions for gqa_d, coco_d in tqdm(merged_dataset): # Resize image img = cv2.imread(str(gqa_img_dir / gqa_d['file_name'])) img_resized = cv2.resize( img, (coco_d['width'], coco_d['height']), ) cv2.imwrite(str(gqa_img_rs_dir / gqa_d['file_name']), img_resized) # Resize bboxes for anno in gqa_d['annotations']: transform = ScaleTransform( gqa_d['height'], gqa_d['width'], coco_d['height'], coco_d['width'], ) bbox = x1y1wh_to_xyxy(anno['bbox']) bbox_resized = transform.apply_box(np.array(bbox))[0].tolist() anno['bbox'] = bbox_resized anno['bbox_mode'] = 0 gqa_d['height'] = coco_d['height'] gqa_d['width'] = coco_d['width'] # Add bbox info for panoptic coco seg_map = read_image(coco_d['pan_seg_file_name'], format='RGB') # Convert to segment ids seg_map = rgb2id(seg_map) for s in coco_d['segments_info']: curr_seg = seg_map == s['id'] # [x1, y1, x2, y2] s['bbox'] = segment_to_bbox(curr_seg) print(f'Resized images saved to {gqa_img_rs_dir}') # Saved merged and processed dataset save_path = output_dir / 'data.json' print(f'Merged dataset saved to {save_path}') save_json(merged_dataset, save_path) def process_vrr_and_coco( vrr_img_dir: Path, vrr_img_rs_dir: Path, output_path: Path, vg_id_to_coco_id_path: Path = Path('data/vg/vg_id_to_coco_id.json'), ): init_coco_panoptic_dataset() init_vrr_vg_dataset() # Get and combine datasets coco_train_dataset = DatasetCatalog.get('coco_train') coco_val_dataset = DatasetCatalog.get('coco_val') coco_dataset = coco_train_dataset + coco_val_dataset vrr_dataset = DatasetCatalog.get('vrr_vg') # Check GQA overlap with COCO vg_id_to_coco_id = load_json(vg_id_to_coco_id_path) vg_coco_ids = set(vg_id_to_coco_id.keys()) vrr_ids = set(d['image_id'] for d in vrr_dataset) vg_overlap_ids = vg_coco_ids & vrr_ids # Merge GQA and COCO id_to_coco_data = {d['image_id']: d for d in coco_dataset} merged_dataset = [] for vrr_d in tqdm(vrr_dataset): vg_id = vrr_d['image_id'] if vg_id in vg_id_to_coco_id: coco_id = vg_id_to_coco_id[vg_id] merged_dataset.append((vrr_d, id_to_coco_data[coco_id])) # Resize GQA images to COCO dimensions for vrr_d, coco_d in tqdm(merged_dataset): # Resize image img = cv2.imread(str(vrr_img_dir / vrr_d['file_name'])) img_resized = cv2.resize( img, (coco_d['width'], coco_d['height']), ) cv2.imwrite(str(vrr_img_rs_dir / vrr_d['file_name']), img_resized) # Resize bboxes for anno in vrr_d['annotations']: transform = ScaleTransform( vrr_d['height'], vrr_d['width'], coco_d['height'], coco_d['width'], ) # bbox = x1y1wh_to_xyxy(anno["bbox"]) bbox = anno['bbox'] bbox_resized = transform.apply_box(np.array(bbox))[0].tolist() anno['bbox'] = bbox_resized anno['bbox_mode'] = 0 vrr_d['height'] = coco_d['height'] vrr_d['width'] = coco_d['width'] # Add bbox info for panoptic coco seg_map = read_image(coco_d['pan_seg_file_name'], format='RGB') # Convert to segment ids seg_map = rgb2id(seg_map) for s in coco_d['segments_info']: curr_seg = seg_map == s['id'] # [x1, y1, x2, y2] s['bbox'] = segment_to_bbox(curr_seg) print(f'Resized images saved to {vrr_img_rs_dir}') # Saved merged and processed dataset save_path = output_path print(f'Merged dataset saved to {save_path}') save_json(merged_dataset, save_path) def process_vg_and_coco( vg_img_dir: Path, vg_img_rs_dir: Path, output_path: Path, vg_id_to_coco_id_path: Path = Path('data/vg/vg_id_to_coco_id.json'), ): init_coco_panoptic_dataset() init_vg_dataset() # Get and combine datasets coco_train_dataset = DatasetCatalog.get('coco_train') coco_val_dataset = DatasetCatalog.get('coco_val') coco_dataset = coco_train_dataset + coco_val_dataset vg_train_dataset = DatasetCatalog.get('vg_train') vg_val_dataset = DatasetCatalog.get('vg_val') vg_dataset = vg_train_dataset + vg_val_dataset # Check GQA overlap with COCO vg_id_to_coco_id = load_json(vg_id_to_coco_id_path) vg_coco_ids = set(vg_id_to_coco_id.keys()) vg_ids = set(d['image_id'] for d in vg_dataset) vg_overlap_ids = vg_coco_ids & vg_ids # Merge GQA and COCO id_to_coco_data = {d['image_id']: d for d in coco_dataset} merged_dataset = [] for vg_d in tqdm(vg_dataset): vg_id = vg_d['image_id'] if vg_id in vg_id_to_coco_id: coco_id = vg_id_to_coco_id[vg_id] merged_dataset.append((vg_d, id_to_coco_data[coco_id])) # Resize GQA images to COCO dimensions for vg_d, coco_d in tqdm(merged_dataset): # NOTE Resize image # img = cv2.imread(str(vg_img_dir / vg_d["file_name"])) # img_resized = cv2.resize( # img, # (coco_d["width"], coco_d["height"]), # ) # cv2.imwrite(str(vg_img_rs_dir / vg_d["file_name"]), img_resized) # Resize bboxes for anno in vg_d['annotations']: transform = ScaleTransform( vg_d['height'], vg_d['width'], coco_d['height'], coco_d['width'], ) # bbox = x1y1wh_to_xyxy(anno["bbox"]) bbox = anno['bbox'] bbox_resized = transform.apply_box(np.array(bbox))[0].tolist() anno['bbox'] = bbox_resized anno['bbox_mode'] = 0 vg_d['height'] = coco_d['height'] vg_d['width'] = coco_d['width'] # Add bbox info for panoptic coco seg_map = read_image(coco_d['pan_seg_file_name'], format='RGB') # Convert to segment ids seg_map = rgb2id(seg_map) for s in coco_d['segments_info']: curr_seg = seg_map == s['id'] # [x1, y1, x2, y2] s['bbox'] = segment_to_bbox(curr_seg) # print(f"Resized images saved to {vrr_img_rs_dir}") # Saved merged and processed dataset save_path = output_path print(f'Merged dataset saved to {save_path}') save_json(merged_dataset, save_path) def compute_gqa_coco_overlap(output_path: Path): dataset = load_json(Path('data/psg/data.json')) gqa_obj_cats = load_json(Path('data/gqa/detectron/object_categories.json')) coco_thing_cats = load_json( Path('data/coco/detectron/thing_categories.json')) coco_stuff_cats = load_json( Path('data/coco/detectron/stuff_categories.json')) # For each GQA class, what is the average proportion of COCO classes # within its bounding box? # gqa_obj_id -> coco_obj_id -> prop out = defaultdict(lambda: defaultdict(float)) # Counts the number of instance for each GQA class # gqa_id -> num_instances out_counts = defaultdict(int) for gqa_d, coco_d in tqdm(dataset): # Get seg_id to obj_id map seg_id_to_obj_id = {} for s in coco_d['segments_info']: obj_id = s['category_id'] # Differentiate between thing and stuff classes if not s['isthing']: obj_id += 100 seg_id_to_obj_id[s['id']] = obj_id # Load segment seg_map = read_image(coco_d['pan_seg_file_name'], format='RGB') # Convert to segment ids seg_map = rgb2id(seg_map) # Convert invalid pixels to -1 seg_map[seg_map == 0] = -1 # Convert to object ids for seg_id, obj_id in seg_id_to_obj_id.items(): seg_map[seg_map == seg_id] = obj_id # Compute overlap for each bounding box for anno in gqa_d['annotations']: bbox = [int(c) for c in anno['bbox']] x1, y1, x2, y2 = bbox # Get region in segmentation map seg_bbox = seg_map[y1:y2, x1:x2] # Get id proportion unique, counts = np.unique(seg_bbox, return_counts=True) prop = counts / counts.sum() gqa_obj_id = anno['category_id'] for coco_obj_id, p in zip(unique.tolist(), prop.tolist()): out[gqa_obj_id][coco_obj_id] += p # Update instance count of each GQA class out_counts[gqa_obj_id] += 1 new_out = {} # Process ids to class names for gqa_id, props in tqdm(out.items()): gqa_name = gqa_obj_cats[gqa_id] new_props = {} for coco_id, p in props.items(): if coco_id == -1: coco_name = 'NA' elif coco_id >= 100: coco_name = coco_stuff_cats[coco_id - 100] else: coco_name = coco_thing_cats[coco_id] # Normalize proportions new_props[coco_name] = p / out_counts[gqa_id] new_out[gqa_name] = new_props # Save final output save_json(new_out, output_path) def compute_gqa_coco_overlap_norm(output_path: Path): """Given a GQA class and a COCO class, what is the average IoA of the COCO segment with the GQA bbox?""" dataset = load_json(Path('data/psg/data.json')) gqa_obj_cats = load_json(Path('data/gqa/detectron/object_categories.json')) coco_thing_cats = load_json( Path('data/coco/detectron/thing_categories.json')) coco_stuff_cats = load_json( Path('data/coco/detectron/stuff_categories.json')) # For each GQA class, what is the average proportion of COCO classes # within its bounding box? # gqa_obj_id -> coco_obj_id -> prop out = defaultdict(lambda: defaultdict(float)) # NOTE Different normalizing scheme # Counts the number of instances for each GQA class for each COCO class # gqa_obj_id -> coco_obj_id -> num_instances out_counts = defaultdict(lambda: defaultdict(int)) for gqa_d, coco_d in tqdm(dataset): # Get seg_id to obj_id map seg_id_to_obj_id = {} for s in coco_d['segments_info']: obj_id = s['category_id'] # Differentiate between thing and stuff classes if not s['isthing']: obj_id += 100 seg_id_to_obj_id[s['id']] = obj_id # Load segment seg_map = read_image(coco_d['pan_seg_file_name'], format='RGB') # Convert to segment ids seg_map = rgb2id(seg_map) # Convert invalid pixels to -1 seg_map[seg_map == 0] = -1 # Convert to object ids for seg_id, obj_id in seg_id_to_obj_id.items(): seg_map[seg_map == seg_id] = obj_id # Compute overlap for each bounding box for anno in gqa_d['annotations']: bbox = [int(c) for c in anno['bbox']] x1, y1, x2, y2 = bbox # Get region in segmentation map seg_bbox = seg_map[y1:y2, x1:x2] # Get id proportion unique, counts = np.unique(seg_bbox, return_counts=True) prop = counts / counts.sum() gqa_obj_id = anno['category_id'] for coco_obj_id, p in zip(unique.tolist(), prop.tolist()): out[gqa_obj_id][coco_obj_id] += p # Update instance count of each GQA class out_counts[gqa_obj_id][coco_obj_id] += 1 # FIXME Sort by score new_out = {} # Process ids to class names for gqa_id, props in tqdm(out.items()): gqa_name = gqa_obj_cats[gqa_id] new_props = {} for coco_id, p in props.items(): if coco_id == -1: coco_name = 'NA' elif coco_id >= 100: coco_name = coco_stuff_cats[coco_id - 100] else: coco_name = coco_thing_cats[coco_id] # Normalize proportions new_props[coco_name] = p / out_counts[gqa_id][coco_id] new_out[gqa_name] = new_props # Save final output save_json(new_out, output_path) def compute_coco_gqa_overlap( output_path: Path, method: str = 'iou', # One of {iou, ioa} ): """For each COCO class, compute the average IoU of its bbox with the bbox of all GQA bboxes in the image. For each COCO class, what is the average IoU of its bbox with the bbox of each GQA class? """ dataset = load_json(Path('data/psg/data.json')) gqa_obj_cats = load_json(Path('data/gqa/detectron/object_categories.json')) coco_thing_cats = load_json( Path('data/coco/detectron/thing_categories.json')) coco_stuff_cats = load_json( Path('data/coco/detectron/stuff_categories.json')) # coco_obj_id -> gqa_obj_id -> avg_iou out = defaultdict(lambda: defaultdict(float)) # coco_obj_id -> n_instances out_counts = defaultdict(int) for gqa_d, coco_d in tqdm(dataset): # Get gqa cats and bboxes gqa_annos = [(anno['category_id'], anno['bbox']) for anno in gqa_d['annotations']] # NOTE What if no annotations if gqa_annos == []: continue gqa_cats, gqa_bboxes = zip(*gqa_annos) gqa_bboxes = Boxes(gqa_bboxes) # Get coco cats and bboxes coco_cats = [] # Load segment seg_map = read_image(coco_d['pan_seg_file_name'], format='RGB') # Convert to segment ids seg_map = rgb2id(seg_map) # Convert to bitmasks bit_masks = np.zeros( (len(coco_d['segments_info']), coco_d['height'], coco_d['width'])) for i, s in enumerate(coco_d['segments_info']): if s['isthing']: coco_cats.append(s['category_id']) else: coco_cats.append(s['category_id'] + 80) bit_masks[i] = seg_map == s['id'] bit_masks = BitMasks(bit_masks) coco_bboxes = bit_masks.get_bounding_boxes() # Compute pairwise IoU / IoA # NOTE Change compute function here if method == 'iou': iou_matrix = pairwise_iou(gqa_bboxes, coco_bboxes) elif method == 'ioa': iou_matrix = pairwise_ioa(gqa_bboxes, coco_bboxes) n_gqa, n_coco = iou_matrix.shape # For each coco instance for i, coco_id in enumerate(coco_cats): # IoU of each gqa box with current coco instance ious = iou_matrix[:, i].tolist() for gqa_id, iou in zip(gqa_cats, ious): out[coco_id][gqa_id] += iou out_counts[coco_id] += 1 # Process ids to class names new_out = {} for coco_id, ious in tqdm(out.items()): if coco_id >= 80: coco_name = coco_stuff_cats[coco_id - 80] else: coco_name = coco_thing_cats[coco_id] new_ious = {} for gqa_id, iou in ious.items(): gqa_name = gqa_obj_cats[gqa_id] # Normalize proportions new_ious[gqa_name] = iou / out_counts[coco_id] new_out[coco_name] = new_ious # Save final output save_json(new_out, output_path) def psg_to_kaihua( dataset_path: Path, thing_cats_path: Path, stuff_cats_path: Path, pred_cats_path: Path, output_dir: Path, ): dataset = load_json(dataset_path) pred_cats = load_json(pred_cats_path) thing_cats = load_json(thing_cats_path) stuff_cats = load_json(stuff_cats_path) obj_cats = thing_cats + stuff_cats # Generate metadata dicts idx_to_label = {str(i + 1): c for i, c in enumerate(obj_cats)} label_to_idx = {v: int(k) for k, v in idx_to_label.items()} idx_to_predicate = {str(i + 1): c for i, c in enumerate(pred_cats)} predicate_to_idx = {v: int(k) for k, v in idx_to_predicate.items()} all_predicates = [] for d in dataset: rel_names = [pred_cats[r[2]] for r in d['relations']] all_predicates.extend(rel_names) predicate_count = dict(Counter(all_predicates)) save_json( { 'label_to_idx': label_to_idx, 'idx_to_label': idx_to_label, 'predicate_to_idx': predicate_to_idx, 'idx_to_predicate': idx_to_predicate, 'predicate_count': predicate_count, 'attribute_count': {}, 'idx_to_attribute': {}, 'attribute_to_idx': {}, }, output_dir / 'PSG-dicts.json', ) # Generate image metadata image_data = [] for d in dataset: image_data.append({ 'file_name': d['file_name'], 'image_id': d['vg_image_id'], 'height': d['height'], 'width': d['width'], }) save_json(image_data, output_dir / 'image_data.json') # Generate hdf5 dataset n_objs = sum([len(d['segments_info']) for d in dataset]) attributes = np.zeros((n_objs, 10)) boxes_1024 = [] boxes_512 = [] img_to_first_box = [] img_to_last_box = [] labels = [] predicates = [] relationships = [] img_to_first_rel = [] img_to_last_rel = [] box_idx = 0 rel_idx = 0 for d in tqdm(dataset): old_height = d['height'] old_width = d['width'] for r in d['relations']: s_i, o_i, pred_id = r predicates.append(pred_id + 1) relationships.append([box_idx + s_i, box_idx + o_i]) img_to_first_rel.append(rel_idx) rel_idx += len(d['relations']) img_to_last_rel.append(rel_idx - 1) for s in d['segments_info']: # [x1, y1, x2, y2] # Compute new height, width boxes_1024.append( xyxy_to_xcycwh( resize_bbox(old_height, old_width, s['bbox'], 1024))) boxes_512.append( xyxy_to_xcycwh( resize_bbox(old_height, old_width, s['bbox'], 512))) labels.append(s['category_id'] + 1 if s['isthing'] else s['category_id'] + 81) img_to_first_box.append(box_idx) box_idx += len(d['segments_info']) img_to_last_box.append(box_idx - 1) boxes_1024 = np.array(boxes_1024) boxes_512 = np.array(boxes_512) img_to_first_box = np.array(img_to_first_box) img_to_last_box = np.array(img_to_last_box) labels = np.array(labels) predicates = np.array(predicates) relationships = np.array(relationships) img_to_first_rel = np.array(img_to_first_rel) img_to_last_rel = np.array(img_to_last_rel) labels = labels[..., None] predicates = predicates[..., None] # Get val indices coco_val_img_dir = Path('data/coco/val2017') coco_val_ids = set( [p.stem.lstrip('0') for p in coco_val_img_dir.glob('*.jpg')]) split = [2 if d['image_id'] in coco_val_ids else 0 for d in dataset] split = np.array(split) # Save hdf5 f = h5py.File(output_dir / 'PSG.h5', 'w') f.create_dataset('attributes', data=attributes, dtype='i8') f.create_dataset('boxes_1024', data=boxes_1024, dtype='i4') f.create_dataset('boxes_512', data=boxes_512, dtype='i4') f.create_dataset('img_to_first_box', data=img_to_first_box, dtype='i4') f.create_dataset('img_to_last_box', data=img_to_last_box, dtype='i4') f.create_dataset('img_to_first_rel', data=img_to_first_rel, dtype='i4') f.create_dataset('img_to_last_rel', data=img_to_last_rel, dtype='i4') f.create_dataset('labels', data=labels, dtype='i4') f.create_dataset('predicates', data=predicates, dtype='i4') f.create_dataset('relationships', data=relationships, dtype='i4') f.create_dataset('split', data=split, dtype='i4') f.close()
21,934
30.789855
79
py
OpenPSG
OpenPSG-main/configs/gpsnet/panoptic_fpn_r50_fpn_1x_sgdet_psg.py
_base_ = [ '../motifs/panoptic_fpn_r50_fpn_1x_predcls_psg.py', ] model = dict( relation_head=dict( type='GPSHead', head_config=dict( # NOTE: Evaluation type use_gt_box=False, use_gt_label=False, ), ), roi_head=dict(bbox_head=dict(type='SceneGraphBBoxHead'), ), ) evaluation = dict( interval=1, metric='sgdet', relation_mode=True, classwise=True, iou_thrs=0.5, detection_method='pan_seg', ) data = dict(samples_per_gpu=16) # Log config project_name = 'openpsg' expt_name = 'gpsnet_panoptic_fpn_r50_fpn_1x_sgdet_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, ), ), ], )
928
19.195652
63
py
OpenPSG
OpenPSG-main/configs/gpsnet/panoptic_fpn_r101_fpn_1x_sgdet_psg.py
_base_ = './panoptic_fpn_r50_fpn_1x_sgdet_psg.py' model = dict(backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) # Log config project_name = 'openpsg' expt_name = 'gpsnet_panoptic_fpn_r101_fpn_1x_sgdet_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, ), ), ], ) load_from = 'work_dirs/checkpoints/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth'
671
23.888889
94
py
OpenPSG
OpenPSG-main/configs/gpsnet/panoptic_fpn_r50_fpn_1x_predcls_psg.py
_base_ = [ '../motifs/panoptic_fpn_r50_fpn_1x_predcls_psg.py', ] model = dict(relation_head=dict( type='GPSHead', head_config=dict( # NOTE: Evaluation type use_gt_box=True, use_gt_label=True, ), )) evaluation = dict(interval=1, metric='predcls', relation_mode=True, classwise=True, detection_method='pan_seg') # Change batch size and learning rate data = dict(samples_per_gpu=16, workers_per_gpu=0) optimizer = dict(type='SGD', lr=0.03, momentum=0.9, weight_decay=0.0001) # Log config project_name = 'openpsg' expt_name = 'gpsnet_panoptic_fpn_r50_fpn_1x_predcls_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, ), ), ], )
992
22.642857
72
py
OpenPSG
OpenPSG-main/configs/gpsnet/panoptic_fpn_r101_fpn_1x_predcls_psg.py
_base_ = './panoptic_fpn_r50_fpn_1x_predcls_psg.py' model = dict(backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) # Log config project_name = 'openpsg' expt_name = 'gpsnet_panoptic_fpn_r101_fpn_1x_predcls_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, ), ), ], ) load_from = 'work_dirs/checkpoints/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth'
675
24.037037
94
py
OpenPSG
OpenPSG-main/configs/imp/panoptic_fpn_r50_fpn_1x_sgdet_psg.py
_base_ = [ '../motifs/panoptic_fpn_r50_fpn_1x_predcls_psg.py', ] model = dict(relation_head=dict( type='IMPHead', head_config=dict( # NOTE: Evaluation type use_gt_box=False, use_gt_label=False, num_iter=2, ), )) evaluation = dict( interval=1, metric='sgdet', relation_mode=True, classwise=True, iou_thrs=0.5, detection_method='pan_seg', ) # Change batch size and learning rate data = dict(samples_per_gpu=16, ) # workers_per_gpu=0) # FIXME: Is this the problem? optimizer = dict(type='SGD', lr=0.001, momentum=0.9) # Log config project_name = 'openpsg' expt_name = 'imp_panoptic_fpn_r50_fpn_1x_sgdet_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, # config=work_dir + "/cfg.yaml" ), ), ], )
1,084
21.142857
55
py
OpenPSG
OpenPSG-main/configs/imp/panoptic_fpn_r101_fpn_1x_sgdet_psg.py
_base_ = './panoptic_fpn_r50_fpn_1x_sgdet_psg.py' model = dict(backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) # Log config project_name = 'openpsg' expt_name = 'imp_panoptic_fpn_r101_fpn_1x_sgdet_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, ), ), ], ) load_from = 'work_dirs/checkpoints/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth'
668
23.777778
94
py
OpenPSG
OpenPSG-main/configs/imp/panoptic_fpn_r50_fpn_1x_predcls_psg.py
_base_ = [ '../motifs/panoptic_fpn_r50_fpn_1x_predcls_psg.py', ] model = dict(relation_head=dict( type='IMPHead', head_config=dict( # NOTE: Evaluation type use_gt_box=True, use_gt_label=True, num_iter=2, ), )) evaluation = dict(interval=1, metric='predcls', relation_mode=True, classwise=True) # Change batch size and learning rate data = dict(samples_per_gpu=16, ) # workers_per_gpu=0) # FIXME: Is this the problem? optimizer = dict(type='SGD', lr=0.001, momentum=0.9) # Log config project_name = 'openpsg' expt_name = 'imp_panoptic_fpn_r50_fpn_1x_predcls_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, # config=work_dir + "/cfg.yaml" ), ), ], )
1,071
22.822222
55
py
OpenPSG
OpenPSG-main/configs/imp/panoptic_fpn_r101_fpn_1x_predcls_psg.py
_base_ = './panoptic_fpn_r50_fpn_1x_predcls_psg.py' model = dict(backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) # Log config project_name = 'openpsg' expt_name = 'imp_panoptic_fpn_r101_fpn_1x_predcls_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, # config=work_dir + "/cfg.yaml" ), ), ], ) load_from = 'work_dirs/checkpoints/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth'
765
25.413793
94
py
OpenPSG
OpenPSG-main/configs/vctree/panoptic_fpn_r50_fpn_1x_sgdet_psg.py
_base_ = [ '../motifs/panoptic_fpn_r50_fpn_1x_predcls_psg.py', ] model = dict( relation_head=dict( type='VCTreeHead', head_config=dict( # NOTE: Evaluation type use_gt_box=False, use_gt_label=False, ), ), roi_head=dict(bbox_head=dict(type='SceneGraphBBoxHead'), ), ) evaluation = dict(interval=1, metric='sgdet', relation_mode=True, classwise=True, iou_thrs=0.5, detection_method='pan_seg') # Change batch size and learning rate data = dict(samples_per_gpu=16, # workers_per_gpu=2 ) # optimizer = dict(lr=0.003) # Log config project_name = 'openpsg' expt_name = 'vctree_panoptic_fpn_r50_fpn_1x_sgdet_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, # config=work_dir + "/cfg.yaml" ), ), ], )
1,200
23.02
63
py
OpenPSG
OpenPSG-main/configs/vctree/panoptic_fpn_r101_fpn_1x_sgdet_psg.py
_base_ = './panoptic_fpn_r50_fpn_1x_sgdet_psg.py' model = dict(backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) # Log config project_name = 'openpsg' expt_name = 'vctree_panoptic_fpn_r101_fpn_1x_sgdet_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, # config=work_dir + "/cfg.yaml" ), ), ], ) load_from = 'work_dirs/checkpoints/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth'
764
25.37931
94
py
OpenPSG
OpenPSG-main/configs/vctree/panoptic_fpn_r50_fpn_1x_predcls_psg.py
_base_ = [ '../motifs/panoptic_fpn_r50_fpn_1x_predcls_psg.py', ] model = dict(relation_head=dict( type='VCTreeHead', head_config=dict( # NOTE: Evaluation type use_gt_box=True, use_gt_label=True, ), )) evaluation = dict(interval=1, metric='predcls', relation_mode=True, classwise=True) # Change batch size and learning rate data = dict(samples_per_gpu=16, workers_per_gpu=0) # FIXME: Is this the problem? # optimizer = dict(lr=0.001) # Log config project_name = 'openpsg' expt_name = 'vctree_panoptic_fpn_r50_fpn_1x_predcls_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, # config=work_dir + "/cfg.yaml" ), ), ], )
1,041
22.681818
61
py
OpenPSG
OpenPSG-main/configs/vctree/panoptic_fpn_r101_fpn_1x_predcls_psg.py
_base_ = './panoptic_fpn_r50_fpn_1x_predcls_psg.py' model = dict(backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) # Log config project_name = 'openpsg' expt_name = 'vctree_panoptic_fpn_r101_fpn_1x_predcls_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, # config=work_dir + "/cfg.yaml" ), ), ], ) load_from = 'work_dirs/checkpoints/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth'
768
25.517241
94
py
OpenPSG
OpenPSG-main/configs/psgtr/psgtr_r50.py
model = dict( type='PSGTr', backbone=dict(type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), bbox_head=dict(type='PSGTrHead', num_classes=80, num_relations=117, in_channels=2048, transformer=dict( type='Transformer', encoder=dict(type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=[ dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1) ], feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'ffn', 'norm'))), decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')), )), positional_encoding=dict(type='SinePositionalEncoding', num_feats=128, normalize=True), sub_loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, class_weight=1.0), sub_loss_bbox=dict(type='L1Loss', loss_weight=5.0), sub_loss_iou=dict(type='GIoULoss', loss_weight=2.0), sub_focal_loss=dict(type='BCEFocalLoss', loss_weight=2.0), sub_dice_loss=dict(type='psgtrDiceLoss', loss_weight=2.0), obj_loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, class_weight=1.0), obj_loss_bbox=dict(type='L1Loss', loss_weight=5.0), obj_loss_iou=dict(type='GIoULoss', loss_weight=2.0), obj_focal_loss=dict(type='BCEFocalLoss', loss_weight=2.0), obj_dice_loss=dict(type='psgtrDiceLoss', loss_weight=2.0), rel_loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0, class_weight=1.0)), # training and testing settings train_cfg=dict(assigner=dict( type='HTriMatcher', s_cls_cost=dict(type='ClassificationCost', weight=1.), s_reg_cost=dict(type='BBoxL1Cost', weight=5.0), s_iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), o_cls_cost=dict(type='ClassificationCost', weight=1.), o_reg_cost=dict(type='BBoxL1Cost', weight=5.0), o_iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), r_cls_cost=dict(type='ClassificationCost', weight=2.))), test_cfg=dict(max_per_img=100))
4,523
53.506024
77
py
OpenPSG
OpenPSG-main/configs/psgtr/psgtr_r50_psg_inference.py
_base_ = [ './psgtr_r50_psg.py' ] img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), # NOTE: Do not change the img to DC. dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ], ), ] data = dict( test=dict( pipeline=pipeline, ), )
760
23.548387
53
py
OpenPSG
OpenPSG-main/configs/psgtr/psgtr_r101_psg.py
_base_ = [ '../_base_/models/psgtr_r101.py', '../_base_/datasets/psg.py', '../_base_/custom_runtime.py' ] custom_imports = dict(imports=[ 'openpsg.models.frameworks.psgtr', 'openpsg.models.losses.seg_losses', 'openpsg.models.relation_heads.psgtr_head', 'openpsg.datasets', 'openpsg.datasets.pipelines.loading', 'openpsg.datasets.pipelines.rel_randomcrop', 'openpsg.models.relation_heads.approaches.matcher', 'openpsg.utils' ], allow_failed_imports=False) dataset_type = 'PanopticSceneGraphDataset' # HACK: object_classes = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other', 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', 'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged', 'food-other-merged', 'building-other-merged', 'rock-merged', 'wall-other-merged', 'rug-merged' ] predicate_classes = [ 'over', 'in front of', 'beside', 'on', 'in', 'attached to', 'hanging from', 'on back of', 'falling off', 'going down', 'painted on', 'walking on', 'running on', 'crossing', 'standing on', 'lying on', 'sitting on', 'flying over', 'jumping over', 'jumping from', 'wearing', 'holding', 'carrying', 'looking at', 'guiding', 'kissing', 'eating', 'drinking', 'feeding', 'biting', 'catching', 'picking', 'playing with', 'chasing', 'climbing', 'cleaning', 'playing', 'touching', 'pushing', 'pulling', 'opening', 'cooking', 'talking to', 'throwing', 'slicing', 'driving', 'riding', 'parked on', 'driving on', 'about to hit', 'kicking', 'swinging', 'entering', 'exiting', 'enclosing', 'leaning on', ] model = dict(bbox_head=dict( num_classes=len(object_classes), num_relations=len(predicate_classes), object_classes=object_classes, predicate_classes=predicate_classes, use_mask=True, num_query=100, ), ) img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # train_pipeline, NOTE the img_scale and the Pad's size_divisor is different # from the default setting in mmdet. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadPanopticSceneGraphAnnotations', with_bbox=True, with_rel=True, with_mask=True, with_seg=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RelRandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=False), # no empty relations dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='RelsFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_rels', 'gt_masks']) ] # test_pipeline, NOTE the Pad's size_divisor is different from the default # setting (size_divisor=32). While there is little effect on the performance # whether we use the default setting or use size_divisor=1. test_pipeline = [ dict(type='LoadImageFromFile'), # dict(type='LoadSceneGraphAnnotations', with_bbox=True, with_rel=True), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='ImageToTensor', keys=['img']), # dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']), # dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))), dict(type='Collect', keys=['img']), ]) ] evaluation = dict( interval=1, metric='sgdet', relation_mode=True, classwise=True, iou_thrs=0.5, detection_method='pan_seg', ) data = dict(samples_per_gpu=1, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='AdamW', lr=0.0001, weight_decay=0.0001, paramwise_cfg=dict(custom_keys={ 'backbone': dict(lr_mult=0.1, decay_mult=1.0), })) optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=40) runner = dict(type='EpochBasedRunner', max_epochs=60) project_name = 'psgtr' expt_name = 'psgtr_r101_psg' work_dir = f'./work_dirs/{expt_name}' checkpoint_config = dict(interval=2, max_keep_ckpts=10) log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, ), ) ], ) load_from = 'work_dirs/checkpoints/detr_pan_r101.pth'
7,589
31.715517
98
py
OpenPSG
OpenPSG-main/configs/psgtr/psgtr_r50_psg.py
_base_ = [ '../_base_/models/psgtr_r50.py', '../_base_/datasets/psg.py', '../_base_/custom_runtime.py' ] custom_imports = dict(imports=[ 'openpsg.models.frameworks.psgtr', 'openpsg.models.losses.seg_losses', 'openpsg.models.relation_heads.psgtr_head', 'openpsg.datasets', 'openpsg.datasets.pipelines.loading', 'openpsg.datasets.pipelines.rel_randomcrop', 'openpsg.models.relation_heads.approaches.matcher', 'openpsg.utils' ], allow_failed_imports=False) dataset_type = 'PanopticSceneGraphDataset' # HACK: object_classes = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other', 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', 'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged', 'food-other-merged', 'building-other-merged', 'rock-merged', 'wall-other-merged', 'rug-merged' ] predicate_classes = [ 'over', 'in front of', 'beside', 'on', 'in', 'attached to', 'hanging from', 'on back of', 'falling off', 'going down', 'painted on', 'walking on', 'running on', 'crossing', 'standing on', 'lying on', 'sitting on', 'flying over', 'jumping over', 'jumping from', 'wearing', 'holding', 'carrying', 'looking at', 'guiding', 'kissing', 'eating', 'drinking', 'feeding', 'biting', 'catching', 'picking', 'playing with', 'chasing', 'climbing', 'cleaning', 'playing', 'touching', 'pushing', 'pulling', 'opening', 'cooking', 'talking to', 'throwing', 'slicing', 'driving', 'riding', 'parked on', 'driving on', 'about to hit', 'kicking', 'swinging', 'entering', 'exiting', 'enclosing', 'leaning on', ] model = dict(bbox_head=dict( num_classes=len(object_classes), num_relations=len(predicate_classes), object_classes=object_classes, predicate_classes=predicate_classes, use_mask=True, num_query=100, ), ) img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # train_pipeline, NOTE the img_scale and the Pad's size_divisor is different # from the default setting in mmdet. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadPanopticSceneGraphAnnotations', with_bbox=True, with_rel=True, with_mask=True, with_seg=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RelRandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=False), # no empty relations dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='RelsFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_rels', 'gt_masks']) ] # test_pipeline, NOTE the Pad's size_divisor is different from the default # setting (size_divisor=32). While there is little effect on the performance # whether we use the default setting or use size_divisor=1. test_pipeline = [ dict(type='LoadImageFromFile'), # dict(type='LoadSceneGraphAnnotations', with_bbox=True, with_rel=True), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='ImageToTensor', keys=['img']), # dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']), # dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))), dict(type='Collect', keys=['img']), ]) ] evaluation = dict( interval=1, metric='sgdet', relation_mode=True, classwise=True, iou_thrs=0.5, detection_method='pan_seg', ) data = dict(samples_per_gpu=1, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='AdamW', lr=0.0001, weight_decay=0.0001, paramwise_cfg=dict(custom_keys={ 'backbone': dict(lr_mult=0.1, decay_mult=1.0), })) optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=40) runner = dict(type='EpochBasedRunner', max_epochs=60) project_name = 'psgformer' expt_name = 'psgtr_r50_psg_0.5_scale_mask' work_dir = f'./work_dirs/{expt_name}' checkpoint_config = dict(interval=2, max_keep_ckpts=10) log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, # config=work_dir + "/cfg.yaml" ), ) ], ) load_from = 'work_dirs/checkpoints/detr_pan_r50.pth'
7,699
31.905983
98
py
OpenPSG
OpenPSG-main/configs/_base_/custom_runtime.py
checkpoint_config = dict(interval=1, max_keep_ckpts=1) # yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) # yapf:enable custom_hooks = [dict(type='NumClassCheckHook')] dist_params = dict(backend='nccl') log_level = 'INFO' load_from = None resume_from = None workflow = [('train', 1), ('val', 1)]
399
21.222222
54
py
OpenPSG
OpenPSG-main/configs/_base_/models/detr4seg_r101_psg.py
_base_ = [ '../_base_/models/detr4seg_r101.py', '../_base_/datasets/psg.py', '../_base_/custom_runtime.py' ] custom_imports = dict(imports=[ 'openpsg.models.frameworks.detr4seg', 'openpsg.models.relation_heads.detr4seg_head', 'openpsg.datasets', 'openpsg.datasets.pipelines.loading', 'openpsg.datasets.pipelines.rel_randomcrop', 'openpsg.models.relation_heads.approaches.matcher', 'openpsg.models.losses.seg_losses' ], allow_failed_imports=False) object_classes = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other', 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', 'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged', 'food-other-merged', 'building-other-merged', 'rock-merged', 'wall-other-merged', 'rug-merged' ] model = dict(bbox_head=dict( num_classes=len(object_classes), object_classes=object_classes, )) img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # train_pipeline, NOTE the img_scale and the Pad's size_divisor is different # from the default setting in mmdet. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadSceneGraphAnnotations', with_bbox=True, with_rel=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=False), # no empty relations dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='RelsFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] # test_pipeline, NOTE the Pad's size_divisor is different from the default # setting (size_divisor=32). While there is little effect on the performance # whether we use the default setting or use size_divisor=1. test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict(samples_per_gpu=2, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='AdamW', lr=0.0001, weight_decay=0.0001, paramwise_cfg=dict( custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=110) runner = dict(type='EpochBasedRunner', max_epochs=150) project_name = 'detr4seg' expt_name = 'detr4seg_r101_coco' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')], ) load_from = '/mnt/ssd/gzj/test/OpenPSG/detr_r50_fb_origin.pth'
5,776
40.862319
78
py
OpenPSG
OpenPSG-main/configs/_base_/models/psgtr_r50.py
model = dict( type='PSGTr', backbone=dict(type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), bbox_head=dict(type='PSGTrHead', num_classes=80, num_relations=117, in_channels=2048, transformer=dict( type='Transformer', encoder=dict(type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=[ dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1) ], feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'ffn', 'norm'))), decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')), )), positional_encoding=dict(type='SinePositionalEncoding', num_feats=128, normalize=True), sub_loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, class_weight=1.0), sub_loss_bbox=dict(type='L1Loss', loss_weight=5.0), sub_loss_iou=dict(type='GIoULoss', loss_weight=2.0), sub_focal_loss=dict(type='BCEFocalLoss', loss_weight=1.0), sub_dice_loss=dict(type='psgtrDiceLoss', loss_weight=1.0), obj_loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, class_weight=1.0), obj_loss_bbox=dict(type='L1Loss', loss_weight=5.0), obj_loss_iou=dict(type='GIoULoss', loss_weight=2.0), obj_focal_loss=dict(type='BCEFocalLoss', loss_weight=1.0), obj_dice_loss=dict(type='psgtrDiceLoss', loss_weight=1.0), rel_loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0, class_weight=1.0)), # training and testing settings train_cfg=dict(assigner=dict( type='HTriMatcher', s_cls_cost=dict(type='ClassificationCost', weight=1.), s_reg_cost=dict(type='BBoxL1Cost', weight=5.0), s_iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), o_cls_cost=dict(type='ClassificationCost', weight=1.), o_reg_cost=dict(type='BBoxL1Cost', weight=5.0), o_iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), r_cls_cost=dict(type='ClassificationCost', weight=2.))), test_cfg=dict(max_per_img=100))
4,523
53.506024
77
py
OpenPSG
OpenPSG-main/configs/_base_/models/mask_rcnn_r50_fpn.py
# model settings model = dict( type='MaskRCNN', backbone=dict(type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict(type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict(type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict(type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict(type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), roi_head=dict(type='StandardRoIHead', bbox_roi_extractor=dict(type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict(type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=1.0)), mask_roi_extractor=dict(type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict(type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_mask=dict(type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), # model training and testing settings train_cfg=dict(rpn=dict(assigner=dict(type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict(type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, pos_weight=-1, debug=False), rpn_proposal=dict(nms_pre=2000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict(assigner=dict(type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=True, ignore_iof_thr=-1), sampler=dict(type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=28, pos_weight=-1, debug=False)), test_cfg=dict(rpn=dict(nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict(score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5)))
5,776
52.490741
77
py
OpenPSG
OpenPSG-main/configs/_base_/models/detr4seg_r101.py
model = dict( type='DETR4seg', backbone=dict(type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict(type='detr4segHead', num_classes=80, in_channels=2048, transformer=dict( type='Transformer', encoder=dict(type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=[ dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1) ], feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'ffn', 'norm'))), decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')), )), positional_encoding=dict(type='SinePositionalEncoding', num_feats=128, normalize=True), loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, class_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0), dice_loss=dict(type='DiceLoss', loss_weight=1.0)), # training and testing settings train_cfg=dict(assigner=dict( type='HungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=1.), reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), test_cfg=dict(max_per_img=100))
3,394
51.230769
77
py
OpenPSG
OpenPSG-main/configs/_base_/models/detr4seg_r50.py
model = dict( type='DETR4seg', backbone=dict(type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), bbox_head=dict(type='detr4segHead', num_classes=80, in_channels=2048, transformer=dict( type='Transformer', encoder=dict(type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=[ dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1) ], feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'ffn', 'norm'))), decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')), )), positional_encoding=dict(type='SinePositionalEncoding', num_feats=128, normalize=True), loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, class_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0), focal_loss=dict(type='BCEFocalLoss', loss_weight=1.0), dice_loss=dict(type='psgtrDiceLoss', loss_weight=1.0)), # training and testing settings train_cfg=dict(assigner=dict( type='HungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=1.), reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), test_cfg=dict(max_per_img=100))
3,471
51.606061
77
py
OpenPSG
OpenPSG-main/configs/_base_/models/detr4seg_r50_psg.py
_base_ = ['./detr4seg_r50.py', '../datasets/psg.py', '../custom_runtime.py'] custom_imports = dict(imports=[ 'openpsg.models.frameworks.detr4seg', 'openpsg.models.relation_heads.detr4seg_head', 'openpsg.datasets', 'openpsg.datasets.pipelines.loading', 'openpsg.datasets.pipelines.rel_randomcrop', 'openpsg.models.relation_heads.approaches.matcher', 'openpsg.models.losses.seg_losses' ], allow_failed_imports=False) object_classes = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other', 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', 'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged', 'food-other-merged', 'building-other-merged', 'rock-merged', 'wall-other-merged', 'rug-merged' ] model = dict(bbox_head=dict( num_classes=len(object_classes), object_classes=object_classes, )) img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # train_pipeline, NOTE the img_scale and the Pad's size_divisor is different # from the default setting in mmdet. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadPanopticSceneGraphAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=False), # no empty relations dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='RelsFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] # test_pipeline, NOTE the Pad's size_divisor is different from the default # setting (size_divisor=32). While there is little effect on the performance # whether we use the default setting or use size_divisor=1. test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict(samples_per_gpu=1, workers_per_gpu=1, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='AdamW', lr=0.00001, weight_decay=0.0001, paramwise_cfg=dict( custom_keys={ 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'bbox_attention': dict(lr_mult=10.0, decay_mult=1.0), 'mask_head': dict(lr_mult=10.0, decay_mult=1.0) })) optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=8) runner = dict(type='EpochBasedRunner', max_epochs=10) evaluation = dict(interval=1, metric='PQ') checkpoint_config = dict(interval=1, max_keep_ckpts=10) project_name = 'detr4seg' expt_name = 'test_detr4seg_r50_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, # config=work_dir + "/cfg.yaml" )) ], ) load_from = 'detr_pan_r50.pth'
6,341
40.45098
78
py
OpenPSG
OpenPSG-main/configs/_base_/models/psgtr_r101.py
_base_ = './psgtr_r50.py' model = dict(backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
147
23.666667
76
py
OpenPSG
OpenPSG-main/configs/_base_/models/panoptic_fpn_r101_fpn_psg.py
_base_ = './panoptic_fpn_r50_fpn_psg.py' model = dict(backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) expt_name = 'panoptic_fpn_r101_fpn_psg' load_from = 'work_dirs/checkpoints/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth'
298
32.222222
94
py
OpenPSG
OpenPSG-main/configs/_base_/models/panoptic_fpn_r50_fpn_psg.py
_base_ = [ '../models/mask_rcnn_r50_fpn.py', '../datasets/psg_panoptic.py', '../schedules/schedule_1x.py', '../custom_runtime.py', ] model = dict( type='PanopticFPN', semantic_head=dict( type='PanopticFPNHead', num_things_classes=80, num_stuff_classes=53, in_channels=256, inner_channels=128, start_level=0, end_level=4, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), conv_cfg=None, loss_seg=dict(type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5), ), panoptic_fusion_head=dict(type='HeuristicFusionHead', num_things_classes=80, num_stuff_classes=53), test_cfg=dict(panoptic=dict( score_thr=0.6, max_per_img=100, mask_thr_binary=0.5, mask_overlap=0.5, nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True), stuff_area_limit=4096, )), ) custom_hooks = [] # Change batch size and learning rate data = dict(samples_per_gpu=8, # workers_per_gpu=2 ) # optimizer = dict(lr=0.02) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) lr_config = dict(policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[8, 11]) project_name = 'openpsg' expt_name = 'panoptic_fpn_r50_fpn_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, # config=work_dir + "/cfg.yaml" ), ), ], ) load_from = 'work_dirs/checkpoints/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth'
2,114
27.2
93
py
OpenPSG
OpenPSG-main/configs/_base_/models/detr_r50.py
model = dict( type='DETR', backbone=dict(type='ResNet', depth=50, num_stages=4, out_indices=(3, ), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), bbox_head=dict(type='DETRHead', num_classes=80, in_channels=2048, transformer=dict( type='Transformer', encoder=dict(type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=[ dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1) ], feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'ffn', 'norm'))), decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')), )), positional_encoding=dict(type='SinePositionalEncoding', num_feats=128, normalize=True), loss_cls=dict(type='CrossEntropyLoss', bg_cls_weight=0.1, use_sigmoid=False, loss_weight=1.0, class_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0)), # training and testing settings train_cfg=dict(assigner=dict( type='HungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=1.), reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), test_cfg=dict(max_per_img=100))
3,360
50.707692
77
py
OpenPSG
OpenPSG-main/configs/_base_/schedules/schedule_1x.py
# optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict(policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[8, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12)
366
32.363636
72
py
OpenPSG
OpenPSG-main/configs/_base_/schedules/schedule_3x.py
# optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict(policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.001, step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36)
368
32.545455
72
py
OpenPSG
OpenPSG-main/configs/_base_/datasets/vg_sg.py
# dataset settings dataset_type = 'SceneGraphDataset' ann_file = '/mnt/ssd/gzj/data/VisualGenome/data_openpsg.json' img_dir = '/mnt/ssd/gzj/data/VisualGenome/VG_100K' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadSceneGraphAnnotations', with_bbox=True, with_rel=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='SceneGraphFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_rels', 'gt_relmaps']), ] test_pipeline = [ dict(type='LoadImageFromFile'), # Since the forward process may need gt info, annos must be loaded. dict(type='LoadSceneGraphAnnotations', with_bbox=True, with_rel=True), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), # NOTE: Do not change the img to DC. dict(type='ImageToTensor', keys=['img']), dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']), dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ]) ] data = dict(samples_per_gpu=2, workers_per_gpu=2, train=dict(type=dataset_type, ann_file=ann_file, img_prefix=img_dir, pipeline=train_pipeline, split='train'), val=dict(type=dataset_type, ann_file=ann_file, img_prefix=img_dir, pipeline=test_pipeline, split='test'), test=dict(type=dataset_type, ann_file=ann_file, img_prefix=img_dir, pipeline=test_pipeline, split='test'))
2,328
39.155172
74
py
OpenPSG
OpenPSG-main/configs/_base_/datasets/psg.py
# dataset settings dataset_type = 'PanopticSceneGraphDataset' # ann_file = './data/psg/psg.json' # full data, available after PSG challenge ann_file = './data/psg/psg.json' # './data/psg/psg_train_val.json' for PSG challenge development # ann_file = './data/psg/psg_val_test.json' # for PSG challenge submission coco_root = './data/coco' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadPanopticSceneGraphAnnotations', with_bbox=True, with_rel=True, with_mask=True, with_seg=True, ), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='SegRescale', scale_factor=1 / 4), dict(type='SceneGraphFormatBundle'), dict( type='Collect', keys=[ 'img', 'gt_bboxes', 'gt_labels', 'gt_rels', 'gt_relmaps', 'gt_masks', 'gt_semantic_seg', ], ), ] test_pipeline = [ dict(type='LoadImageFromFile'), # Since the forward process may need gt info, annos must be loaded. dict(type='LoadPanopticSceneGraphAnnotations', with_bbox=True, with_rel=True), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), # NOTE: Do not change the img to DC. dict(type='ImageToTensor', keys=['img']), dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']), dict( type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels')), ), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ], ), ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=ann_file, img_prefix=coco_root, seg_prefix=coco_root, pipeline=train_pipeline, split='train', all_bboxes=True, ), val=dict( type=dataset_type, ann_file=ann_file, img_prefix=coco_root, seg_prefix=coco_root, pipeline=test_pipeline, split='test', all_bboxes=True, ), test=dict( type=dataset_type, ann_file=ann_file, img_prefix=coco_root, seg_prefix=coco_root, pipeline=test_pipeline, split='test', all_bboxes=True, ), )
2,861
28.8125
97
py
OpenPSG
OpenPSG-main/configs/_base_/datasets/psg_panoptic.py
# dataset settings dataset_type = 'PanopticSceneGraphDataset' ann_file = './data/psg/psg.json' coco_root = './data/coco' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadPanopticSceneGraphAnnotations', with_bbox=True, with_mask=True, with_seg=True, ), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='SegRescale', scale_factor=1 / 4), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg'], ), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ], ), ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=ann_file, img_prefix=coco_root, seg_prefix=coco_root, pipeline=train_pipeline, split='train', ), val=dict( type=dataset_type, ann_file=ann_file, img_prefix=coco_root, seg_prefix=coco_root, pipeline=test_pipeline, split='test', ), test=dict( type=dataset_type, ann_file=ann_file, img_prefix=coco_root, seg_prefix=coco_root, pipeline=test_pipeline, split='test', ), ) evaluation = dict(interval=1, metric='PQ')
2,005
26.479452
78
py
OpenPSG
OpenPSG-main/configs/_base_/datasets/vg_detection.py
# dataset settings custom_imports = dict(imports=[ 'openpsg.datasets', 'openpsg.datasets.pipelines', ], allow_failed_imports=False) dataset_type = 'SceneGraphDataset' ann_file = 'data/vg/data_openpsg.json' img_dir = 'data/vg/VG_100K' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadSceneGraphAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict(samples_per_gpu=2, workers_per_gpu=2, train=dict(type=dataset_type, ann_file=ann_file, img_prefix=img_dir, pipeline=train_pipeline, split='train'), val=dict(type=dataset_type, ann_file=ann_file, img_prefix=img_dir, pipeline=test_pipeline, split='test'), test=dict(type=dataset_type, ann_file=ann_file, img_prefix=img_dir, pipeline=test_pipeline, split='test')) evaluation = dict(interval=1, metric='bbox')
2,016
34.385965
65
py
OpenPSG
OpenPSG-main/configs/_base_/datasets/psg_val.py
# dataset settings dataset_type = 'PanopticSceneGraphDataset' ann_file = 'data/psg/psg.json' coco_root = 'data/coco' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) test_pipeline = [ dict(type='LoadImageFromFile'), # Since the forward process may need gt info, annos must be loaded. dict(type='LoadPanopticSceneGraphAnnotations', with_bbox=True, with_rel=True), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), # NOTE: Do not change the img to DC. dict(type='ImageToTensor', keys=['img']), dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']), dict( type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels')), ), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ], ), ] data = dict( test=dict( type=dataset_type, ann_file=ann_file, img_prefix=coco_root, seg_prefix=coco_root, pipeline=test_pipeline, split='test', all_bboxes=True, ), ) evaluation1 = dict(metric=['sgdet'], relation_mode=True, classwise=True, iou_thrs=0.5, detection_method='pan_seg') evaluation2 = dict(metric=['PQ'], relation_mode=True, classwise=True, iou_thrs=0.5, detection_method='pan_seg')
1,793
29.931034
73
py
OpenPSG
OpenPSG-main/configs/psgformer/psgformer_r50_psg.py
_base_ = [ './psgformer_r50.py', '../_base_/datasets/psg.py', '../_base_/custom_runtime.py' ] find_unused_parameters = True custom_imports = dict(imports=[ 'openpsg.models.frameworks.psgtr', 'openpsg.models.losses.seg_losses', 'openpsg.models.frameworks.dual_transformer', 'openpsg.models.relation_heads.psgformer_head', 'openpsg.datasets', 'openpsg.datasets.pipelines.loading', 'openpsg.datasets.pipelines.rel_randomcrop', 'openpsg.models.relation_heads.approaches.matcher', 'openpsg.utils' ], allow_failed_imports=False) dataset_type = 'PanopticSceneGraphDataset' # HACK: object_classes = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other', 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', 'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged', 'food-other-merged', 'building-other-merged', 'rock-merged', 'wall-other-merged', 'rug-merged' ] predicate_classes = [ 'over', 'in front of', 'beside', 'on', 'in', 'attached to', 'hanging from', 'on back of', 'falling off', 'going down', 'painted on', 'walking on', 'running on', 'crossing', 'standing on', 'lying on', 'sitting on', 'flying over', 'jumping over', 'jumping from', 'wearing', 'holding', 'carrying', 'looking at', 'guiding', 'kissing', 'eating', 'drinking', 'feeding', 'biting', 'catching', 'picking', 'playing with', 'chasing', 'climbing', 'cleaning', 'playing', 'touching', 'pushing', 'pulling', 'opening', 'cooking', 'talking to', 'throwing', 'slicing', 'driving', 'riding', 'parked on', 'driving on', 'about to hit', 'kicking', 'swinging', 'entering', 'exiting', 'enclosing', 'leaning on', ] model = dict(bbox_head=dict( num_classes=len(object_classes), num_relations=len(predicate_classes), object_classes=object_classes, predicate_classes=predicate_classes, num_obj_query=100, num_rel_query=100, ), ) img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # train_pipeline, NOTE the img_scale and the Pad's size_divisor is different # from the default setting in mmdet. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadPanopticSceneGraphAnnotations', with_bbox=True, with_rel=True, with_mask=True, with_seg=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[ [ dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict(type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict(type='RelRandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=False), # no empty relations dict(type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='RelsFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_rels', 'gt_masks']) ] # test_pipeline, NOTE the Pad's size_divisor is different from the default # setting (size_divisor=32). While there is little effect on the performance # whether we use the default setting or use size_divisor=1. test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadSceneGraphAnnotations', with_bbox=True, with_rel=True), dict(type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='ImageToTensor', keys=['img']), dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']), dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))), dict(type='Collect', keys=['img']), ]) ] evaluation = dict( interval=1, metric='sgdet', relation_mode=True, classwise=True, iou_thrs=0.5, detection_method='pan_seg', ) data = dict(samples_per_gpu=1, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='AdamW', lr=0.001, weight_decay=0.0001, paramwise_cfg=dict( custom_keys={ 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'transformer.encoder': dict(lr_mult=0.1, decay_mult=1.0), 'transformer.decoder1': dict(lr_mult=0.1, decay_mult=1.0), 'obj_query_embed': dict(lr_mult=0.1, decay_mult=1.0), 'input_proj': dict(lr_mult=0.1, decay_mult=1.0), 'class_embed': dict(lr_mult=0.1, decay_mult=1.0), 'box_embed': dict(lr_mult=0.1, decay_mult=1.0), 'bbox_attention': dict(lr_mult=0.1, decay_mult=1.0), 'mask_head': dict(lr_mult=0.1, decay_mult=1.0), })) optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=40) runner = dict(type='EpochBasedRunner', max_epochs=60) project_name = 'psgformer' expt_name = 'psgformer_r50_psg' work_dir = f'./work_dirs/{expt_name}' checkpoint_config = dict(interval=1, max_keep_ckpts=15) log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, ), ) ], ) load_from = './work_dirs/checkpoints/detr4psgformer_r50.pth'
8,231
32.6
78
py
OpenPSG
OpenPSG-main/configs/psgformer/psgformer_r101_psg.py
_base_ = './psgformer_r50_psg.py' model = dict(backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) # learning policy lr_config = dict(policy='step', step=48) runner = dict(type='EpochBasedRunner', max_epochs=60) project_name = 'psgformer' expt_name = 'psgformer_r101_psg' work_dir = f'./work_dirs/{expt_name}' checkpoint_config = dict(interval=12, max_keep_ckpts=10) load_from = './work_dirs/checkpoints/detr4psgformer_r101.pth'
488
27.764706
76
py
OpenPSG
OpenPSG-main/configs/psgformer/psgformer_r50.py
model = dict( type='PSGTr', backbone=dict(type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), bbox_head=dict( type='PSGFormerHead', num_classes=80, num_relations=117, in_channels=2048, transformer=dict( type='DualTransformer', encoder=dict(type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=[ dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1) ], feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'ffn', 'norm'))), decoder1=dict(type='DetrTransformerDecoder', return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm'))), decoder2=dict(type='DetrTransformerDecoder', return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm'))), ), positional_encoding=dict(type='SinePositionalEncoding', num_feats=128, normalize=True), rel_loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0, class_weight=1.0), sub_id_loss=dict(type='MultilabelCrossEntropy', loss_weight=2.0), obj_id_loss=dict(type='MultilabelCrossEntropy', loss_weight=2.0), loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=4.0, class_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=3.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0), focal_loss=dict(type='BCEFocalLoss', loss_weight=1.0), dice_loss=dict(type='psgtrDiceLoss', loss_weight=1.0)), # training and testing settings train_cfg=dict(id_assigner=dict(type='IdMatcher', sub_id_cost=dict(type='ClassificationCost', weight=1.), obj_id_cost=dict(type='ClassificationCost', weight=1.), r_cls_cost=dict(type='ClassificationCost', weight=1.)), bbox_assigner=dict(type='HungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=4.0), reg_cost=dict(type='BBoxL1Cost', weight=3.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), test_cfg=dict(max_per_img=100))
5,041
50.979381
79
py
OpenPSG
OpenPSG-main/configs/motifs/panoptic_fpn_r50_fpn_1x_sgdet_psg.py
_base_ = [ './panoptic_fpn_r50_fpn_1x_predcls_psg.py', ] model = dict( relation_head=dict( head_config=dict( # NOTE: Evaluation type use_gt_box=False, use_gt_label=False, ), ), roi_head=dict(bbox_head=dict(type='SceneGraphBBoxHead'), ), ) evaluation = dict(interval=1, metric='sgdet', relation_mode=True, classwise=True, iou_thrs=0.5, detection_method='pan_seg') # Change batch size and learning rate data = dict(samples_per_gpu=8, # workers_per_gpu=2 ) # Log config project_name = 'openpsg' expt_name = 'motifs_panoptic_fpn_r50_fpn_1x_sgdet_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, ), ), ], )
1,038
22.088889
63
py
OpenPSG
OpenPSG-main/configs/motifs/panoptic_fpn_r101_fpn_1x_sgdet_psg.py
_base_ = './panoptic_fpn_r50_fpn_1x_sgdet_psg.py' model = dict(backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) # Log config project_name = 'openpsg' expt_name = 'motifs_panoptic_fpn_r101_fpn_1x_sgdet_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, # config=work_dir + "/cfg.yaml" ), ), ], ) load_from = 'work_dirs/checkpoints/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth'
764
25.37931
94
py
OpenPSG
OpenPSG-main/configs/motifs/panoptic_fpn_r50_fpn_1x_predcls_psg.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/psg.py', '../_base_/schedules/schedule_1x.py', '../_base_/custom_runtime.py', ] find_unused_parameters = True dataset_type = 'PanopticSceneGraphDataset' # HACK: object_classes = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other', 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', 'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged', 'food-other-merged', 'building-other-merged', 'rock-merged', 'wall-other-merged', 'rug-merged' ] predicate_classes = [ 'over', 'in front of', 'beside', 'on', 'in', 'attached to', 'hanging from', 'on back of', 'falling off', 'going down', 'painted on', 'walking on', 'running on', 'crossing', 'standing on', 'lying on', 'sitting on', 'flying over', 'jumping over', 'jumping from', 'wearing', 'holding', 'carrying', 'looking at', 'guiding', 'kissing', 'eating', 'drinking', 'feeding', 'biting', 'catching', 'picking', 'playing with', 'chasing', 'climbing', 'cleaning', 'playing', 'touching', 'pushing', 'pulling', 'opening', 'cooking', 'talking to', 'throwing', 'slicing', 'driving', 'riding', 'parked on', 'driving on', 'about to hit', 'kicking', 'swinging', 'entering', 'exiting', 'enclosing', 'leaning on', ] model = dict( type='SceneGraphPanopticFPN', semantic_head=dict( type='PanopticFPNHead', num_things_classes=80, num_stuff_classes=53, in_channels=256, inner_channels=128, start_level=0, end_level=4, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), conv_cfg=None, loss_seg=dict(type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5), ), panoptic_fusion_head=dict(type='HeuristicFusionHead', num_things_classes=80, num_stuff_classes=53), test_cfg=dict(panoptic=dict( score_thr=0.6, max_per_img=100, mask_thr_binary=0.5, mask_overlap=0.5, nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True), stuff_area_limit=4096, )), relation_head=dict( type='MotifHead', object_classes=object_classes, predicate_classes=predicate_classes, num_classes=len(object_classes) + 1, # with background class num_predicates=len(predicate_classes) + 1, use_bias=False, # NOTE: whether to use frequency bias head_config=dict( # NOTE: Evaluation type use_gt_box=True, use_gt_label=True, use_vision=True, embed_dim=200, hidden_dim=512, roi_dim=1024, context_pooling_dim=4096, dropout_rate=0.2, context_object_layer=1, context_edge_layer=1, glove_dir='data/glove/', causal_effect_analysis=False, ), bbox_roi_extractor=dict( type='VisualSpatialExtractor', bbox_roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), with_visual_bbox=True, with_visual_mask=False, with_visual_point=False, with_spatial=False, in_channels=256, fc_out_channels=1024, featmap_strides=[4, 8, 16, 32], ), relation_roi_extractor=dict( type='VisualSpatialExtractor', bbox_roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), with_visual_bbox=True, with_visual_mask=False, with_visual_point=False, with_spatial=True, separate_spatial=False, in_channels=256, fc_out_channels=1024, featmap_strides=[4, 8, 16, 32], ), relation_sampler=dict( type='Motif', pos_iou_thr=0.5, require_overlap=False, # for sgdet training, not require num_sample_per_gt_rel=4, num_rel_per_image=1024, pos_fraction=0.25, # NOTE: To only include overlapping bboxes? test_overlap=False, # for testing ), loss_object=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_relation=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), ), ) custom_hooks = [] # To freeze modules freeze_modules = [ 'backbone', 'neck', 'rpn_head', 'roi_head', 'semantic_head', 'panoptic_fusion_head', ] evaluation = dict(interval=1, metric='predcls', relation_mode=True, classwise=True) # Change batch size and learning rate data = dict(samples_per_gpu=16, ) # optimizer = dict(lr=0.003) optimizer = dict(type='SGD', lr=0.03, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) lr_config = dict(policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[7, 10]) # Log config project_name = 'openpsg' expt_name = 'motifs_panoptic_fpn_r50_fpn_1x_predcls_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, ), ), ], ) load_from = 'work_dirs/checkpoints/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth'
7,468
29.863636
93
py
OpenPSG
OpenPSG-main/configs/motifs/panoptic_fpn_r101_fpn_1x_predcls_psg.py
_base_ = './panoptic_fpn_r50_fpn_1x_predcls_psg.py' model = dict(backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) # Log config project_name = 'openpsg' expt_name = 'motifs_panoptic_fpn_r101_fpn_1x_predcls_psg' work_dir = f'./work_dirs/{expt_name}' log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') dict( type='WandbLoggerHook', init_kwargs=dict( project=project_name, name=expt_name, # config=work_dir + "/cfg.yaml" ), ), ], ) load_from = 'work_dirs/checkpoints/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth'
768
25.517241
94
py
epsnoise
epsnoise-master/setup.py
from setuptools import setup setup(name="epsnoise", description="Simulate pixel noise for weak-lensing ellipticity measurements", long_description="Simulate pixel noise for weak-lensing ellipticity measurements", version="0.1", license="MIT", author="Peter Melchior", author_email="peter.m.melchior@gmail.com", py_modules=["epsnoise"], url="https://github.com/pmelchior/epsnoise", requires=["numpy", "scipy"] )
467
30.2
88
py
epsnoise
epsnoise-master/epsnoise.py
#!/bin/env python from numpy import cos, sin, exp, angle, real, conj from numpy.random import normal, random, rayleigh from math import pi, sqrt, tanh from scipy.special import erf from scipy.optimize import fmin def chi2eps(chi): """Calculate epsilon ellipticity from chi. Args: chi: a real or complex number, or a numpy array thereof. Returns: The epsilon ellipticity in the same shape as the input. """ return chi / (1 + (1 - abs(chi)**2 + 0j)**0.5) def eps2chi(eps): """Calculate chi ellipticity from epsilon. Args: eps: a real or complex number, or a numpy array thereof. Returns: The chi ellipticity in the same shape as the input. """ return 2*eps/(1 + abs(eps)**2) def addNoise(eps, nu, transform_eps=True): """Add noise to the ellipticity. Calculates the moments of a Gaussian-shaped galaxy with given ellipticity eps and their errors, assuming a correlation of the symmetric moments Q_11 and Q_22 of rho_n = 1/3. Samples from a Gaussian noise distribution, such that the significance equals nu, then returns the ratio of the noisy moments. If transform_eps = True, calls chi2eps on the result, i.e. returns epsilon ellipticity instead of chi. Args: eps: a real or complex number, or a numpy array thereof. nu: a real number or a numpy array with the same shape as eps. transform_eps: whether the result is return as epsilon ellipticity rather than chi. Returns: The noisy ellipticity measurement with the same shape as eps. """ # rotate into semi-major axis frame e = abs(eps) chi = eps2chi(e) w = 1 # w=s^2, but we can set s=1 z = chi*w S = len(e) sigma_n = ((1+e)*(1-e))**0.5/(nu*pi**0.5) # for flux=1 sigma_11 = sigma_n * (3*pi/4/((1-e)**5 * (1+e)))**0.5 sigma_12 = sigma_n * (pi/4/((1+e)**3 *(1-e)**3))**0.5 sigma_22 = sigma_n * (3*pi/4/((1+e)**5 * (1-e)))**0.5 dQ11 = normal(0, 1, S) # actual variance enters below dQ22 = normal(0, 1, S) dQ12 = normal(0, sigma_12, S) # Q11 and Q22 are correlated with rho = 1/3 # need to take into account that variances of Q11 and Q22 change with e rho = 1./3 dQ22 = (rho*sigma_22)*dQ11 + ((sigma_22**2 - (rho*sigma_22)**2)**0.5)*dQ22 dQ11 *= sigma_11 # now w and z become correlated w_ = w + dQ11 + dQ22 z_ = z + dQ11 - dQ22 chi1_ = z_/w_ chi2_ = 2*dQ12/w_ phi = angle(eps) chi_ = chi1_*cos(phi) - chi2_*sin(phi) + 1j*(chi1_*sin(phi) + chi2_*cos(phi)) if transform_eps: return chi2eps(chi_) else: return chi_ def sampleEllipticity(size, sigma_e=0.3): """Draw ellipticity samples from the Rayleigh distribution. Samples from the Rayleigh distribution of width sigma_e, but makes sure that no ellipticities with |epsilon| >= 1 are created by resampling the outliers. The orientation of the sample is uniform in the interval [0 .. pi). Args: size: A positive integer. sigma_e: The width parameter/mode of the Rayleigh distribution Returns: A complex numpy array of given size. """ e = rayleigh(sigma_e, size) # make sure no outliers are created # this effectively tightens the distribution mask = (e >= 1) while sum(mask) > 0: e[mask] = rayleigh(sigma_e, sum(mask)) mask = (e >= 1) # sample unformly random orientation and create complex ellipticity phi = pi*random(size) return e*(cos(2*phi) + 1j * sin(2*phi)) def addShear(eps, g): """Add shear to complex ellipticity. Args: eps: The complex ellipticity epsilon as a real or complex number or a numpy array thereof. g: The shear as a real or complex number or a numpy array with the same shape as eps. Returns: The sheared ellipticity with the same shape as eps. """ return (eps + g)/(1 + eps*conj(g)) ## Analytic form of Marsaglia distribution def marsaglia_f(t, a, b): """Compute the distribution of t = (a+x)/(b+y) with x,y ~ N(0,1). Args: t: A real number or a numpy array thereof. a: A real number or a numpy array with the same shape as t. b: A real number or a numpy array with the same shape as t. Returns: The value of the distribution at each value of t. """ q = (b + a*t)/(1+t**2)**0.5 return exp(-(a*a + b*b)/2)/(pi*(1 + t**2)) * (1 + q*exp(q**2/2)*sqrt(pi/2)*erf(sqrt(0.5)*q)) def marsaglia(t, mu_w, mu_z, sigma_w, sigma_z, rho): """Compute the value of the Marsaglia distribution p_M(t). Transforms the ratio of t=w/z, where w,z are drawn from a bivariate Gaussian distribution with variances sigma_w and sigma_z and correlation rho, into the form of (a+x)/(b+y) and evaluates marsaglia_f(t, a, b). Args: t: A real number or a numpy array thereof. mu_w: The mean of w as a real number. mu_z: The mean of z as a real number. sigma_w: The dispersion of w as a real number. sigma_z: The dispersion of w as a real number. rho: The correlation between w and z as a real number. Assumed to be in the range [0 .. 1). Returns: The value of p_M(t) at each value of t. """ s = rho * sigma_z/sigma_w r = sigma_w/(sigma_z*(1-rho*rho)**0.5) a = (mu_z/sigma_z - rho*(mu_w/sigma_w))/(1-rho*rho)**0.5 b = mu_w/sigma_w return r * marsaglia_f(r*(t-s), a, b) def marsaglia_eps(t, eps, nu): """Compute the Marsaglia distribution for the ellipticity chi. Calculates the moments of a Gaussian-shaped galaxy with given ellipticity eps and their errors, assuming a correlation of the symmetric moments Q_11 and Q_22 of rho_n = 1/3, such that the image has significance nu. Returns the Marsaglia distribution for the ratio z/w, i.e. for the complex ellipticity chi. Args: t: A real number or a numpy array thereof. eps: The ellipticity epsilon as a real or complex number. nu: The image signicance as a positive number. Returns: The Marsaglia distribution of chi, given the true value eps and the significance nu. """ e = abs(eps) chi = eps2chi(e) w = 1 # w=s^2, but we can set s=1 z = chi*w sigma_n = ((1+e)*(1-e))**0.5/(nu*pi**0.5) # F = 1 here sigma_11 = sigma_n * (3*pi/4/((1-e)**5 * (1+e)))**0.5 sigma_12 = sigma_n * (pi/4/((1+e)**3 *(1-e)**3))**0.5 sigma_22 = sigma_n * (3*pi/4/((1+e)**5 * (1-e)))**0.5 # Q11 and Q22 are correlated with rho=1/3 rho = 1./3 sigma_w = sqrt(sigma_11**2 + sigma_22**2 + 2*rho*sigma_11*sigma_22) sigma_z = sqrt(sigma_11**2 + sigma_22**2 - 2*rho*sigma_11*sigma_22) rho = (sigma_11**2 - sigma_22**2)/(sigma_z*sigma_w) return marsaglia(t, w, z, sigma_w, sigma_z, rho) ## Shear estimators from section 4 def epsilon_mean(eps, limit=0.999): """Compute mean of the ellipticity distribution. Args: eps: A numpy array of real or complex ellipticity (epsilon) estimates. limit: The truncation limit, a positive number. Returns: Compute the mean of the eps samples, subject to the requirement |eps| < limit. """ mask = (abs(eps) < limit) return eps[mask].mean() def chi_responsivity(chi, limit=2.): """Compute shear from a sample of chi ellipticities. As chi is not an unbiased estimator of the shear, the resposivity correction 1 - chi.std()**2 is applied. Args: chi: A numpy array of real or complex ellipticity (chi) estimates. limit: The truncation limit, a positive number. Returns: The mean of the chi sampled, subject to the requirement |chi| < limit, corrected for the responsivity of the sample. """ mask = (abs(chi) < limit) return chi[mask].mean()/(2-chi[mask].std()**2) def chi_s_mean(gamma, chi): """Calculate the absolute value of the estimated source-plance ellipticity. Args: gamma: A list of the two shear components. chi: A numpy array of complex ellipticity (chi) estimates. Returns: The absolute value of the sum of residual source plane ellipticities. """ g = gamma[0] + 1j*gamma[1] return abs(((chi - 2*g + g**2 * conj(chi))/(1+abs(g)**2 - 2*real(g*conj(chi)))).sum()) def chi_shear(chi): """Compute shear estimator that nulls the residual source-plane ellipticity. Runs a minimizer, initialized at (0,0), for the estimated shear, such that the de-lensing of the given sample of chi estimates yields an maximally isotropic distribution. Args: chi: A numpy array of complex ellipticity (chi) estimates. Returns: The shear estimate with the smallest residual source-plane ellipticity. """ gamma = [0,0] gamma = fmin(chi_s_mean, gamma, args=(chi,), xtol=1e-8, disp=False) return gamma[0] + 1j*gamma[1]
9,017
33.288973
96
py
pyGPCCA
pyGPCCA-main/setup.py
from pathlib import Path from setuptools import setup, find_packages try: from pygpcca import __email__, __author__, __version__, __maintainer__ except ImportError: __author__ = __maintainer__ = "Bernhard Reuter" __version__ = "1.0.4" __email__ = "bernhard-reuter@gmx.de" setup( name="pygpcca", use_scm_version=True, setup_requires=["setuptools_scm"], version=__version__, author=__author__, author_email=__email__, maintainer=__maintainer__, maintainer_email=__email__, description=Path("README.rst").read_text("utf-8").splitlines()[34], long_description="\n".join(Path("README.rst").read_text("utf-8").splitlines()[:-4]).replace("|br|", "\n"), long_description_content_type="text/x-rst; charset=UTF-8", url="https://github.com/msmdev/pygpcca", download_url="https://pypi.org/project/pygpcca/", project_urls={ "Documentation": "https://pygpcca.readthedocs.io/en/latest", "Source Code": "https://github.com/msmdev/pygpcca", }, license="LGPLv3+", platforms=["Linux", "MacOSX"], packages=find_packages(), zip_safe=False, install_requires=[line.strip() for line in Path("requirements.txt").read_text("utf-8").splitlines()], extras_require=dict( # https://gitlab.com/petsc/petsc/-/issues/803 slepc=[ "mpi4py>=3.0.3", "petsc>=3.18.0", "slepc>=3.18.0", "petsc4py>=3.18.0", "slepc4py>=3.18.0", ], dev=["pre-commit>=2.9.0", "bump2version"], test=["tox>=3.20.1"], docs=[ line.strip() for line in (Path("docs") / "requirements.txt").read_text("utf-8").splitlines() if not line.startswith("-r") ], ), classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Science/Research", "Natural Language :: English", "License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)", "Operating System :: POSIX :: Linux", "Operating System :: MacOS :: MacOS X", "Typing :: Typed", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Topic :: Scientific/Engineering :: Mathematics", ], keywords=sorted( { "GPCCA", "G-PCCA", "Generalized Perron Cluster Cluster Analysis", "Markov state model", "Markov state modeling", "coarse-graining", "spectral clustering", "non-equilibrium system", "non-reversible process", "non-autonomous process", "cyclic states", "metastable states", "molecular dynamics", "cellular dynamics", "molecular kinetics", "cellular kinetics", "Schur decomposition", "Schur vectors", } ), )
3,114
33.611111
110
py
pyGPCCA
pyGPCCA-main/pygpcca/_sort_real_schur.py
# Python version (translated by Fabian Paul; revised by Bernhard Reuter, Michal Klein) of the following original work: # -------------------------------------------------------------------------------------------------------------------- # Title: Sorting Real Schur Forms # Author: Jan Brandts # E-Mail: brandts-AT-science.uva.nl # http://m2matlabdb.ma.tum.de/download.jsp?MC_ID=3&MP_ID=119 # http://dx.doi.org/10.1002/nla.274 # Institution: University of Amsterdam # Description: In Matlab 6, there exists a command to generate a real Schur form, # whereas another transforms a real Schur form into a complex one. # There do not exist commands to prescribe the order in which the eigenvalues appear on the diagonal of the upper # (quasi-) triangular factor T. For the complex case, a routine is sketched in Golub and Van Loan (1996), # that orders the diagonal of T according to their distance to a target value. # In the reference below, we give a Matlab routine to sort real Schur forms in Matlab. # It is based on a block-swapping procedure by Bai and Demmel (1993). # Sorting real Schur forms, both partially and completely, # has important applications in the computation of real invariant subspaces. # Reference: J.H. Brandts. Matlab code for sorted real Schur forms. # Numerical Linear Algebra with Applications 9(3):249-261 (2002) # Keywords: Real Schur Form, sorting, Bai-Demmel algorithm, swapping # Based on the original Matlab File Version: 1.0 # -------------------------------------------------------------------------------------------------------------------- # All references to equations or pages made in the comments are referencing # Jan Brandts. Matlab Code for Sorted Real Schur Forms. Preprint No. 1180, # January, 2001, Universiteit Utrecht, # https://www.math.uu.nl/publications/preprints/1180.pdf # -------------------------------------------------------------------------------------------------------------------- from typing import List, Tuple, Union, Literal import numpy as np from pygpcca._types import ArrayLike from pygpcca.utils._docs import d __all__ = ["sort_real_schur"] expensive_asserts = False @d.dedent def sort_real_schur( Q: ArrayLike, R: ArrayLike, z: Literal["LM", "LR"], b: float, inplace: bool = False ) -> Tuple[ArrayLike, ArrayLike, List[float]]: r""" Partially or completely sort the real Schur form `R` and Schur vectors `Q` of a square matrix `A`. The diagonal blocks of `R` will be ordered with respect to a target `z`. The blocks on the diagonal are associated with either real eigenvalues, in case of 1x1 blocks, or pairs of complex eigenvalues, in case of 2x2 blocks. The number of ordered blocks is determined by a parameter `b`. A vector `ap` warns for inaccuracy of the solution, if an entry of `ap` exceeds one. This function is based on MATLAB code originally published by Brandts [Brandts02]_. Parameters ---------- Q Orthogonal real matrix `Q` of Schur vectors such that :math:`AQ = QR`. R Quasi-triangular real Schur form `R` such that :math:`AQ = QR`. %(z)s b Determines the length of the ordering with respect to `z`. Valid options are: - ``b < 0``: ``-b`` blocks will be sorted. - ``b > 0``: b or ``b + 1`` eigenvalues will be sorted, depending on the sizes of the blocks. - ``b = 0``: the whole Schur form will be sorted. inplace Determines, if the supplied `Q` and `R` matrices are sorted in place (``ìnplace = True``) or if copies are made and sorted (``inplace = False``; default). Returns ------- Tuple of the following: - Q : orthogonal real `(n, n)` Schur vector matrix `Q` such that :math:`AQ = QR` with the diagonal blocks ordered with respect to the target `z`. - R : quasi-triangular real `(n, n)` Schur matrix `R` such that :math:`AQ = QR` with the diagonal blocks ordered with respect to the target `z`. - ap : A list `ap` warns for inaccuracy of the solution, if an entry of `ap` exceeds one. """ # noqa: D401 eps = np.finfo(R.dtype).eps if not np.all(np.abs(np.tril(R, -2)) <= 100 * eps): raise ValueError("R is not block-triangular.") if not inplace: Q = Q.copy() R = R.copy() r = np.where(np.abs(np.diag(R, -1)) > 100 * eps)[0] # detect sub-diagonal nonzero entries s = [ i for i in range(R.shape[0] + 1) if i not in r + 1 ] # construct from them a vector s with the-top left positions of each block p = np.empty((len(s) - 1,), dtype=np.complex128) for k in range(1, len(s) - 1): # debug assert R[s[k], s[k] - 1] <= 100 * eps # debug for k in range(len(s) - 1): # ranging over all blocks sk = s[k] if s[k + 1] - sk == 2: # if the block is 2x2 Q, R = normalize(Q, R, slice(sk, s[k + 1]), inplace=True) # normalize it # store the eigenvalues p[k] = R[sk, sk] + np.lib.scimath.sqrt(R[sk + 1, sk] * R[sk, sk + 1]) # type: ignore[attr-defined] else: # (the one with the positive imaginary part is sufficient) assert s[k + 1] - sk == 1 # debug p[k] = R[s[k], s[k]] # if the block is 1x1, only store the eigenvalue ap = [] for k in swaplist(p, s, z, b): # For k ranging over all neighbor-swaps assert k + 2 < len(s) # debug v = list(range(s[k], s[k + 1])) # collect the coordinates of the blocks w = list(range(s[k + 1], s[k + 2])) assert v[0] != w[0] # debug if len(v) == 2: assert v[0] < v[1] # debug if len(w) == 2: assert w[0] < w[1] # debug if ( __debug__ and expensive_asserts ): # debug: check that we are moving the larger eigenvalues to the left (expensive test) if v[0] < w[0]: # debug arr = [p[k], p[k + 1]] # debug _, which = select(arr, z) # debug assert which == 1 # debug else: # debug arr = [p[k + 1], p[k]] # debug _, which = select(arr, z) # debug assert which == 1 # debug vw = v + w nrA = np.linalg.norm(R[vw, :][:, vw], ord=np.inf) # compute norm of the matrix A from eq. (6) Q, R = swap(Q, R, v, w, inplace=True) # swap the blocks p[k], p[k + 1] = p[k + 1], p[k] # debug s[k + 1] = s[k] + s[k + 2] - s[k + 1] # update positions of blocks v = list(range(s[k], s[k + 1])) # update block-coordinates w = list(range(s[k + 1], s[k + 2])) if len(v) == 2: # if the first block is 2 x 2 Q, R = normalize(Q, R, v, inplace=True) # normalize it if len(w) == 2: # if the second block is 2 x 2 Q, R = normalize(Q, R, w, inplace=True) # normalize it ap.append( float(np.linalg.norm(R[w, :][:, v], ord=np.inf) / (10 * eps * nrA)) ) # measure size of bottom-left block (see p.6, Sect. 2.3) R = R - np.tril(R, -2) # Zero the below-block entries for k in range(1, len(s) - 1): # to get a quasi-triangle again R[s[k], s[k] - 1] = 0 return Q, R, ap # Based on the original MATLAB code bellow: # ------------------------------------------------------------------------- # r = find(abs(diag(R,-1)) > 100*eps); # s = 1:size(R,1)+1; # s(r+1) = []; # # for k=1:length(s)-1; # sk = s(k); # if s(k+1)-sk == 2 # [Q,R] = normalize(Q,R,sk:s(k+1)-1); # p(k) = R(sk,sk)+sqrt(R(sk+1,sk)*R(sk,sk+1)); # else # p(k) = R(s(k),s(k)); # end # end # # for k = swaplist(p,s,z,b); # v = s(k):s(k+1)-1; # w = s(k+1):s(k+2)-1; # nrA = norm(R([v,w],[v,w]),inf); # [Q,R] = swap(Q,R,v,w); # s(k+1) = s(k)+s(k+2)-s(k+1); # v = s(k):s(k+1)-1; # w = s(k+1):s(k+2)-1; # if length(v)==2 # [Q,R] = normalize(Q,R,v); # end # if length(w)==2 # [Q,R] = normalize(Q,R,w); # end # ap(k) = norm(R(w,v),inf)/(10*eps*nrA); # end # # R = R - tril(R,-2); # for j=2:length(s)-1; R(s(j),s(j)-1)=0; end # ------------------------------------------------------------------------- def normalize( U: ArrayLike, S: ArrayLike, v: Union[slice, List[int]], inplace: bool = False ) -> Tuple[ArrayLike, ArrayLike]: """ Apply a Givens rotation such that the two-by-two diagonal block of `S` situated at diagonal positions \ ``v[0]``, ``v[1]`` is in standardized form. I.e., the diagonal entries are equal, and the off-diagonal elements are of opposite sign. Parameters ---------- U Orthogonal real matrix. S Quasi-triangular real matrix. v List of diagonal positions of the considered block of `S`. inplace Determines, if the supplied `U` and `S` matrices are used in place (``ìnplace = True``) or if copies are made and manipulated (``inplace = False``; default). Returns ------- Tuple of the following: - U : Orthogonal real matrix. - S : Quasi-triangular real matrix with the two-by-two diagonal block of `S` situated at diagonal positions ``v[0]``, ``v[1]`` in standardized form. """ Q = rot(S[v, :][:, v]) # Determine the Givens rotation needed for standardization - if not inplace: S = S.copy() U = U.copy() S[:, v] = np.dot(S[:, v], Q) # and apply it left and right to S, and right to U. S[v, :] = np.dot(Q.T, S[v, :]) # Only rows and columns with indices in the vector v can be affected by this. U[:, v] = np.dot(U[:, v], Q) return U, S # Based on the original MATLAB code bellow: # ------------------------------------------------------------------------- # function [U,S] = normalize(U,S,v); # n = size(S,1); # Q = rot(S(v,v)); # S(:,v) = S(:,v)*Q; # S(v,:) = Q'*S(v,:); # U(:,v) = U(:,v)*Q; # ------------------------------------------------------------------------- def rot(X: ArrayLike) -> ArrayLike: r""" Compute a Givens rotation needed in the :func:`normalize`. Parameters ---------- X Two-by-two block of the quasi-triangular real Schur matrix. Returns ------- Two-by-two block in standardized from. """ c = 1.0 # Start with the identity transformation, and if needed, change it into ... s = 0.0 if X[0, 0] != X[1, 1]: tau = (X[0, 1] + X[1, 0]) / (X[0, 0] - X[1, 1]) off = (tau**2 + 1) ** 0.5 v = [tau - off, tau + off] w = int(np.argmin(np.abs(v))) c = 1.0 / (1.0 + v[w] ** 2) ** 0.5 # ... the cosine and sine as given in Section 2.3.1 s = v[w] * c return np.array([[c, -s], [s, c]], dtype=X.dtype) # Based on the original MATLAB code bellow: # ------------------------------------------------------------------------- # function Q = rot(X); # c = 1; s = 0; # if X(1,1)~=X(2,2); # tau = (X(1,2)+X(2,1))/(X(1,1)-X(2,2)); # off = sqrt(tau^2+1); # v = [tau - off, tau + off]; # [d,w] = min(abs(v)); # c = 1/sqrt(1+v(w)^2); # s = v(w)*c; # end # Q = [c -s;s c]; # ------------------------------------------------------------------------- @d.dedent def swaplist(p: Union[ArrayLike, List[float]], s: List[int], z: Literal["LM", "LR"], b: float) -> List[int]: """ Produce a list `v` of swaps of neighboring blocks needed to order the eigenvalues assembled in the vector `p` \ from closest to `z` to farthest away from `z`, taking into account the parameter `b`. To do so, Python's :func:`sorted`, producing a stable sort, is used to realize the objective ordering of the diagonal blocks. This objective ordering can easily be defined, since all eigenvalues can be extracted from the given real Schur form. This, in turn, results in an objective permutation of the given ordering, which can be realized by `n` swaps of neighboring pairs, to be represented by a swaplist `v`. p List of eigenvalues (only one copy for each complex-conjugate pair). s List of the the-top left positions of each block. %(z)s b Determines the length of the ordering with respect to `z`. Valid options are: - ``b < 0``: ``-b`` blocks will be sorted. - ``b > 0``: b or ``b+1`` eigenvalues will be sorted, depending on the sizes of the blocks. - ``b = 0``: the whole Schur form will be sorted. Returns ------- Swaplist `v`, where ``v[j] = k`` means that in the `j`-th swap, the `k`-th and `k+1`-th block should be swapped. """ p_orig = p # debug n = len(p) p = list(p) k = 0 v: List[int] = [] srtd = 0 # Number of sorted eigenvalues. q = list(np.diff(s)) # Compute block sizes. q_orig = list(q) # debug fini = False while not fini: _, j = select(p[k:n], z) # Determine which block will go to position k p_j = p[k + j] # debug p[k : n + 1] = [p[j + k]] + p[k:n] # insert this block at position k, assert p[k] == p_j # debug del p[j + k + 1] # and remove it from where it was taken. if expensive_asserts and __debug__: assert np.all(sorted(p) == sorted(p_orig)) # debug q_j = q[k + j] # debug q[k : n + 1] = [q[j + k]] + q[k:n] # Similar for the block-sizes assert q[k] == q_j # debug del q[j + k + 1] if expensive_asserts and __debug__: assert np.all(sorted(q) == sorted(q_orig)) # debug v = v + list(range(k, j + k))[::-1] # Update the list of swaps for this block srtd = srtd + q[k] # Update the number of sorted eigenvalues k += 1 fini = k >= n - 1 or k == -b or srtd == b or (srtd == b + 1 and b != 0) return v # Based on the original MATLAB code bellow: # ------------------------------------------------------------------------- # function v = swaplist(p,s,z,b); # n = length(p); # k = 0; v = []; # srtd = 0; # q = diff(s); # fini = 0; # while ~fini # k = k+1; # [dum,j] = select(p(k:n),z); # p(k:n+1) = [p(j+k-1) p(k:n)]; # p(j+k) = []; # q(k:n+1) = [q(j+k-1) q(k:n)]; # q(j+k) = []; # v = [v,j+k-2:-1:k]; # srtd = srtd + q(k); # fini = (k==n-1)|(k==-b)|(srtd==b)|((srtd==b+1)&(b~=0)); # end # ------------------------------------------------------------------------- @d.dedent def select(p: ArrayLike, z: Literal["LM", "LR"]) -> Tuple[float, int]: """ Determine which block is next in the ordering (needed in :func:`normalize`). Parameters ---------- p List of eigenvalues. %(z)s Returns ------- Block that is next in the ordering. """ if z == "LM": pos = int(np.argmax(np.abs(p))) return np.abs(p[pos]), pos elif z == "LR": pos = int(np.argmax(np.real(p))) return np.real(p[pos]), pos else: raise NotImplementedError(z) # possible further sorting critera, if needed... # y = np.real(z) + np.abs(np.imag(z))*1j # Move target to the upper half plane. # delta = np.abs(np.array(p) - y) # pos = np.argmin(delta) # Find block closest to the target. # return delta[pos], pos # Based on the original MATLAB code bellow: # ------------------------------------------------------------------------- # function [val,pos] = select(p,z); # y = real(z)+abs(imag(z))*i; # [val pos] = min(abs(p-y)); # ------------------------------------------------------------------------- def swap(U: ArrayLike, S: ArrayLike, v: List[int], w: List[int], inplace: bool = False) -> Tuple[ArrayLike, ArrayLike]: """ Swap the two blocks on the diagonal of `S` at positions symbolized by the entries of `v` and `w`. U Orthogonal real matrix. S Quasi-triangular real matrix. v List of integers (either one integer, if one has a one-by-one block or two integers, if one has a two-by-two block) indicating the block to swap against the block indicated by `w`. w List of integers (either one integer, if one has a one-by-one block or two integers, if one has a two-by-two block) indicating the block to swap against the block indicated by `v`. inplace Determines, if the supplied `U` and `S` matrices are used in place (``ìnplace = True``) or if copies are made and manipulated (``inplace = False``; default). Returns ------- Tuple of the following: - U : Orthogonal real matrix. - S : Quasi-triangular real matrix with the two blocks on the diagonal of `S`, at positions symbolized by the entries of `v` and `w`, swapped. """ p, q = S[v, :][:, w].shape # p and q are block sizes Ip = np.eye(p) Iq = np.eye(q) r = np.concatenate( [S[v, w[j]] for j in range(q)] ) # Vectorize right-hand side for Kronecker product formulation of the Sylvester equations (7). K = np.kron(Iq, S[v, :][:, v]) - np.kron(S[w, :][:, w].T, Ip) # Kronecker product system matrix. L, H, P, Q = lu_complpiv(K, inplace=True) # LU-decomposition of this matrix. e = np.min(np.abs(np.diag(H))) # Scaling factor to prevent overflow. sigp = np.arange(p * q) for k in range(p * q - 1): # Implement permutation P of the LU-decomposition PAQ=LU ... sigp[[k, P[k]]] = sigp[[P[k], k]].copy() r = e * r[sigp] # ... scale and permute the right-hand side. try: x = np.linalg.solve(H, np.linalg.solve(L, r)) # and solve the two triangular systems. except np.linalg.LinAlgError as err: raise RuntimeError(f"Condition number of H is {np.linalg.cond(H)}.") from err sigq = np.arange(p * q) for k in range(p * q - 1): # Implement permutation Q of the LU-decomposition PAQ=LU ... sigq[[k, Q[k]]] = sigq[[Q[k], k]].copy() x[sigq] = x.copy() # ... and permute the solution. X = np.vstack( [x[j * p : (j + 1) * p] for j in range(q)] ).T # De-vectorize the solution back to a block, or, quit Kronecker formulation. Q, R = np.linalg.qr(np.vstack((-X, e * Iq)), mode="complete") # Householder QR-decomposition of X. vw = list(v) + list(w) if not inplace: S = S.copy() U = U.copy() S[:, vw] = np.dot(S[:, vw], Q) # Perform the actual swap by left- and right-multiplication of S by Q, S[vw, :] = np.dot(Q.T, S[vw, :]) U[:, vw] = np.dot(U[:, vw], Q) # and, right-multiplication of U by Q return U, S # Based on the original MATLAB code bellow: # ------------------------------------------------------------------------- # function [U,S] = swap(U,S,v,w); # [p,q] = size(S(v,w)); Ip = eye(p); Iq = eye(q); # r = []; # for j=1:q # r = [r;S(v,w(j))]; # end # K = kron(Iq,S(v,v))-kron(S(w,w)',Ip); # [L,H,P,Q] = lu_complpiv(K); # e = min(abs(diag(H))); # sigp = 1:p*q; # for k = 1:p*q-1; # sigp([k,P(k)]) = sigp([P(k),k]); # end # r = e*r(sigp); # x = (H\(L\r)); # sigq = 1:p*q; # for k = 1:p*q-1; # sigq([k,Q(k)]) = sigq([Q(k),k]); # end # x(sigq) = x; # X = []; # for j=1:q # X = [X,x((j-1)*p+1:j*p)]; # end # [Q,R] = qr([-X;e*Iq]); # S(:,[v,w]) = S(:,[v,w])*Q; # S([v,w],:) = Q'*S([v,w],:); # U(:,[v,w]) = U(:,[v,w])*Q; # ------------------------------------------------------------------------- def lu_complpiv(A: ArrayLike, inplace: bool = False) -> Tuple[ArrayLike, ArrayLike, ArrayLike, ArrayLike]: r""" Compute the LU-decomposition of a matrix `A` with complete pivoting. I. e., :math:`PAQ = LU` with permutations `P`, `Q` symbolized by vectors. Parameters ---------- A Square matrix. inplace Determines, if the supplied `A` matrix is used in place (``ìnplace = True``) or if a copy is made and used (``inplace = False``; default). Returns ------- Tuple of the following: - L : Lower triangular matrix. - U : Upper triangular matrix. - P : Permutation matrix, which, when left-multiplied to `A`, reorders the rows of `A`. - Q : Permutation matrix, which, when right-multiplied to `A`, reorders the columns of `A`. """ if not inplace or (__debug__ and expensive_asserts): A_inp = A # debug A = A.copy() n = A.shape[0] P = np.zeros(n - 1, dtype=int) Q = np.zeros(n - 1, dtype=int) for k in range( n - 1 ): # See Golub and Van Loan, p. 118 for comments on this LU-decomposition with complete pivoting. Ak = A[k:n, :][:, k:n] rw, cl = np.unravel_index(np.argmax(np.abs(Ak), axis=None), Ak.shape) rw += k cl += k A[[k, rw], :] = A[[rw, k], :].copy() # type: ignore[index] A[:, [k, cl]] = A[:, [cl, k]].copy() # type: ignore[index] P[k] = rw Q[k] = cl if A[k, k] != 0: rs = slice(k + 1, n) A[rs, k] = A[rs, k] / A[k, k] A[rs, :][:, rs] = A[rs, :][:, rs] - A[rs, k][:, np.newaxis] * A[k, rs] U = np.tril(A.T).T L = np.tril(A, -1) + np.eye(n) if __debug__ and expensive_asserts: perm_p = np.arange(n) # debug for k in range(n - 1): # debug perm_p[[k, P[k]]] = perm_p[[P[k], k]].copy() # debug perm_q = np.arange(n) # debug for k in range(n - 1): # debug perm_q[[k, Q[k]]] = perm_q[[Q[k], k]].copy() # debug assert np.allclose(A_inp[perm_p, :][:, perm_q], np.dot(L, U)) # debug return L, U, P, Q # Based on the original MATLAB code bellow: # ------------------------------------------------------------------------- # function [L,U,P,Q] = lu_complpiv(A); # P = []; Q = []; n = size(A,1); # for k=1:n-1; # [a,r] = max(abs(A(k:n,k:n))); # [dummy,c] = max(abs(a)); # cl = c+k-1; # rw = r(c)+k-1; # A([k,rw],:) = A([rw,k],:); # A(:,[k,cl]) = A(:,[cl,k]); # P(k) = rw; Q(k) = cl; # if A(k,k) ~= 0; # rs = k+1:n; # A(rs,k) = A(rs,k)/A(k,k); # A(rs,rs) = A(rs,rs)-A(rs,k)*A(k,rs); # end # end # U = tril(A')'; L = tril(A,-1) + eye(n); # ------------------------------------------------------------------------- if __name__ == "__main__": import scipy import scipy.linalg expensive_asserts = True for _ in range(100): n = np.random.randint(2, 50) A = np.random.randn(n, n) if n % 10 == 0: z = np.inf else: z = float( np.random.randn(1) ) # + abs(float(np.random.randn(1)))*1j # TODO: rewrite the whole test to cover complex R, Q = scipy.linalg.schur(A, output="real") T, Z = scipy.linalg.rsf2csf(R, Q) ev_orig = np.diag(T) delta_orig = np.abs(ev_orig - z) eps = np.finfo(R.dtype).eps assert np.allclose(np.dot(A, Q), np.dot(Q, R)) r = np.count_nonzero(np.abs(np.diag(R, -1)) > 100 * eps) Q, R, ap = sort_real_schur(Q, R, z, 0, inplace=(n % 2 == 0)) # TODO: move the assersion comments to messages assert np.allclose(np.dot(A, Q), np.dot(Q, R)) # check that still a decomposition of the original matrix # test that Q and R have the correct structure assert np.allclose(np.dot(Q, Q.T), np.eye(A.shape[0])) # Q orthonormal assert np.all(np.tril(R, -2) == 0) # R triangular assert r == np.count_nonzero(np.abs(np.diag(R, -1)) > 100 * eps) # number of blocks in R is preserved # check that eigenvalues are sorted T, Z = scipy.linalg.rsf2csf(R, Q) ev = np.diag(T) np.allclose(sorted(ev), sorted(ev_orig)) # check that eigenvalues were preserved if np.isinf(z): delta = -np.abs(ev) else: delta = np.abs(ev - z) assert np.all(delta[0:-1] <= delta[1:] + 100 * eps), (np.max(delta[0:-1] - delta[1:]), delta)
23,920
37.334936
119
py
pyGPCCA
pyGPCCA-main/pygpcca/_types.py
from typing import Any, Optional import numpy as np __all__ = ["ArrayLike"] try: from numpy.typing import NDArray ArrayLike = NDArray[Any] except (ImportError, TypeError): ArrayLike = np.ndarray # type: ignore[misc] OArray = Optional[ArrayLike]
264
15.5625
48
py
pyGPCCA
pyGPCCA-main/pygpcca/__init__.py
from pygpcca.utils import stationary_distribution # type: ignore[attr-defined] from pygpcca._gpcca import GPCCA, gpcca_coarsegrain __author__ = __maintainer__ = "Bernhard Reuter" __version__ = "1.0.4" __email__ = "bernhard-reuter@gmx.de"
240
33.428571
79
py
pyGPCCA
pyGPCCA-main/pygpcca/_gpcca.py
# This file is part of pyGPCCA. # # Copyright (c) 2020 Bernhard Reuter. # With contributions of Marius Lange and Michal Klein. # Based on the original MATLAB GPCCA code authored by Bernhard Reuter, Susanna Roeblitz and Marcus Weber, # Zuse Institute Berlin, Takustrasse 7, 14195 Berlin # --------------------------------------------------------------------------------------------------------------------- # The development of pyGPCCA started at the beginning of 2020 in a fork of MSMTools # (Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER); # provided under LGPL-3.0 License), since at this time it was planned to integrate GPCCA into it. # Due to this, some similarities in structure/AST and code (indicated were evident) between pcca.py # https://github.com/markovmodel/msmtools/blob/93126608c6fa9c3197f4fae2f6da93140762b047/msmtools/analysis/dense/pcca.py # and _gpcca.py can be found. # --------------------------------------------------------------------------------------------------------------------- # If you use this code or parts of it, cite the following reference: # --------------------------------------------------------------------------------------------------------------------- # Bernhard Reuter, Konstantin Fackeldey, and Marcus Weber, # Generalized Markov modeling of nonreversible molecular kinetics, # The Journal of Chemical Physics, 150(17):174103, 2019. # https://doi.org/10.1063/1.5064530 # --------------------------------------------------------------------------------------------------------------------- # pyGPCCA is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser # General Public License as published by the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; # without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along with this program. # If not, see <http://www.gnu.org/licenses/>. # --------------------------------------------------------------------------------------------------------------------- __author__ = __maintainer__ = "Bernhard Reuter" __email__ = "bernhard-reuter@gmx.de" __copyright__ = "Copyright 2020, Bernhard Reuter" __credits__ = [ "Bernhard Reuter", "Marcus Weber", "Susanna Roeblitz", "Marius Lange", "Michal Klein", "Fabian Paul", "Alexander Sikorski", ] from typing import Dict, List, Tuple, Union, Literal, Callable, Optional, TYPE_CHECKING try: from functools import cached_property # type: ignore[attr-defined] except ImportError: from functools import lru_cache def cached_property(fn: Callable) -> property: # type: ignore[no-redef,type-arg] """Cached property backport.""" # noqa: D401 # mypy complains about overriding the same name return property(lru_cache(maxsize=1)(fn)) import sys import logging if not sys.warnoptions: import os import warnings warnings.simplefilter("always", category=UserWarning) # Change the filter in this process os.environ["PYTHONWARNINGS"] = "always::UserWarning" # Also affect subprocesses from scipy.linalg import subspace_angles from scipy.sparse import issparse, spmatrix from scipy.optimize import fmin import numpy as np import scipy.sparse as sp from pygpcca._types import OArray, ArrayLike from pygpcca.utils._docs import d from pygpcca.utils._utils import ( connected_sets, is_transition_matrix, stationary_distribution, ) from pygpcca._sorted_schur import sorted_schur, _check_conj_split from pygpcca.utils._constants import EPS, DEFAULT_SCHUR_METHOD __all__ = ["gpcca_coarsegrain", "GPCCA"] @d.dedent def _gram_schmidt_mod(X: ArrayLike, eta: ArrayLike) -> ArrayLike: r""" :math:`\eta`-orthonormalize Schur vectors. This uses a modified, numerically stable version of Gram-Schmidt Orthonormalization. Parameters ---------- X Array of shape `(n, m)` consisting columnwise of the `m` dominant Schur vectors of :math:`\tilde{P} = \mathtt{diag}(\sqrt{\eta}) P \mathtt{diag}(1.0. / \sqrt{eta})`. %(eta)s Returns ------- Array of shape `(n, m)` with the orthonormalized `m` dominant Schur vectors of :math:`\tilde{P}` in columns. The elements of the first column are constantly equal :math:`\sqrt{eta}`. """ # Keep copy of the original (Schur) vectors for later sanity check. Xc = np.copy(X) # Initialize matrices. n, m = X.shape Q = np.zeros((n, m)) R = np.zeros((m, m)) # Search for the constant (Schur) vector, if explicitly present. max_i = 0 for i in range(m): vsum = np.sum(X[:, i]) dummy = np.ones(X[:, i].shape) * (vsum / n) if np.allclose(X[:, i], dummy, rtol=1e-6, atol=1e-5): max_i = i # TODO: check, if more than one vec fulfills this # Shift non-constant first (Schur) vector to the right. X[:, max_i] = X[:, 0] # Set first (Schur) vector equal sqrt(eta) (In _do_schur() the Q-matrix, orthogonalized by # _gram_schmidt_mod(), will be multiplied with 1.0./sqrt(eta) - so the first (Schur) vector will # become the unit vector 1!). X[:, 0] = np.sqrt(eta) # Raise, if the subspace changed! dummy = subspace_angles(X, Xc) if not np.allclose(dummy, 0.0, atol=1e-7, rtol=1e-5): logging.error(Xc) logging.error(X) raise ValueError( "The subspace of Q derived by shifting a non-constant first (Schur)vector " "to the right and setting the first (Schur) vector equal sqrt(eta) doesn't " f"match the subspace of the original Q! The subspace angles are: {dummy}. " f"Number of clusters: {m}." ) # eta-orthonormalization for j in range(m): v = X[:, j] for i in range(j): R[i, j] = np.dot(Q[:, i].conj(), v) v = v - np.dot(R[i, j], Q[:, i]) R[j, j] = np.linalg.norm(v) Q[:, j] = np.true_divide(v, R[j, j]) # Raise, if the subspace changed! dummy = subspace_angles(Q, Xc) if not np.allclose(dummy, 0.0, atol=1e-7, rtol=1e-5): raise ValueError( "The subspace of Q derived by eta-orthogonalization doesn't match the " f"subspace of the original Q! The subspace angles are: {dummy}. " f"Number of clusters: {m}." ) # Raise, if the (Schur)vectors aren't orthogonal! if not np.allclose(Q.conj().T.dot(Q), np.eye(Q.shape[1]), atol=1e-8, rtol=1e-5): dev = np.max(np.abs(Q.conj().T.dot(Q) - np.eye(Q.shape[1]))) raise ValueError( f"(Schur)vectors do not appear to be orthogonal. Largest absolute element-wise deviation in " f"(Q^*TQ - I) is {dev}" ) return Q @d.dedent def _do_schur( P: Union[ArrayLike, spmatrix], eta: ArrayLike, m: int, z: Literal["LM", "LR"] = "LM", method: str = DEFAULT_SCHUR_METHOD, tol_krylov: float = 1e-16, ) -> Tuple[ArrayLike, ArrayLike, ArrayLike]: r""" Firstly, a Schur decomposition of the `(n, n)` transition matrix `P` is performed, with due regard to the input distribution of states `eta`. In theory `eta` can be an arbitrary distribution as long as it is a valid probability distribution (i.e., sums up to 1). A neutral and valid choice would be the uniform distribution (default). In case of a reversible transition matrix, the stationary distribution :math:`\pi` can (but don't has to) be used here. In case of a non-reversible `P`, some initial or average distribution of the states might be chosen instead of the uniform distribution. Afterwards the Schur form and Schur vector matrix are sorted by sorting the `m` dominant (default: with the largest magnitude) eigenvalues to the top left of the Schur form in descending order and correspondingly sorting the associated Schur vectors to the left of the Schur vector matrix. Finally, nly the top left `(m, m)` part of the sorted Schur form and the associated left `(n, m)` part of the correspondingly sorted Schur vector matrix are returned. Parameters ---------- %(P)s %(eta)s %(m)s These correspond to the `m` dominant (default: with the largest magnitude) eigenvalues. %(z)s %(method)s %(tol_krylov)s Returns ------- Triple of the following: X %(Q_sort)s R %(R_sort)s eigenvalues %(eigenvalues_m)s """ # noqa: D205, D400 # Exceptions N1 = P.shape[0] N2 = P.shape[1] if m < 0: raise ValueError("The number of clusters/states is not supposed to be negative.") if N1 != N2: raise ValueError("P matrix isn't quadratic.") if eta.shape[0] != N1: raise ValueError("eta vector length doesn't match with the shape of P.") if not np.allclose(np.sum(P, 1), 1.0, rtol=1e-6, atol=1e-6): # previously eps dev = np.max(np.abs(np.sum(P, 1) - 1.0)) raise ValueError( f"Not all rows of P sum up to one. P must be a row-stochastic matrix Largest deviation from row-sums to 1 " f"is {dev}." ) if not np.all(eta > EPS): smallest_eta = np.min(eta) raise ValueError(f"Not all elements of eta are > 0. The smallest element is {smallest_eta}") # Weight the stochastic matrix P by the input (initial) distribution eta. if issparse(P): A = sp.dia_matrix(([np.sqrt(eta)], [0]), shape=P.shape) B = sp.dia_matrix(([1.0 / np.sqrt(eta)], [0]), shape=P.shape) P_bar = A.dot(P).dot(B) else: P_bar = np.diag(np.sqrt(eta)).dot(P).dot(np.diag(1.0 / np.sqrt(eta))) # Make a Schur decomposition of P_bar and sort the Schur vectors (and form). R, Q, eigenvalues = sorted_schur(P_bar, m, z, method, tol_krylov=tol_krylov) # Pbar!!! # Orthonormalize the sorted Schur vectors Q via modified Gram-Schmidt-orthonormalization, # if the (Schur)vectors aren't orthogonal! if not np.allclose(Q.T.dot(Q * eta[:, None]), np.eye(Q.shape[1]), rtol=1e6 * EPS, atol=1e6 * EPS): logging.debug("The Schur vectors aren't D-orthogonal so they are D-orthogonalized.") Q = _gram_schmidt_mod(Q, eta) # Transform the orthonormalized Schur vectors of P_bar back # to orthonormalized Schur vectors X of P. X = np.true_divide(Q, np.sqrt(eta)[:, None]) else: # Search for the constant (Schur) vector, if explicitly present. n, m = Q.shape max_i = 0 for i in range(m): vsum = np.sum(Q[:, i]) dummy = np.ones(Q[:, i].shape) * (vsum / n) if np.allclose(Q[:, i], dummy, rtol=1e-6, atol=1e-5): max_i = i # TODO: check, if more than one vec fulfills this # Shift non-constant first (Schur) vector to the right. Q[:, max_i] = Q[:, 0] # Transform the orthonormalized Schur vectors of P_bar back # to orthonormalized Schur vectors X of P. X = np.true_divide(Q, np.sqrt(eta)[:, None]) # Set first (Schur) vector equal 1. X[:, 0] = 1.0 if not X.shape[0] == N1: raise ValueError( f"The number of rows `n={X.shape[0]}` of the Schur vector matrix X doesn't match " f"those `n={P.shape[0]}` of P." ) # Raise, if the first column X[:,0] of the Schur vector matrix isn't constantly equal 1! if not np.allclose(X[:, 0], 1.0, atol=1e-8, rtol=1e-5): dev = np.max(np.abs(X[:, 0] - 1.0)) raise ValueError( f"The first column X[:, 0] of the Schur vector matrix isn't constantly equal 1. The largest " f"deviation from one is {dev}." ) # Raise, if the (Schur)vectors aren't D-orthogonal (don't fullfill the orthogonality condition)! if not np.allclose(X.T.dot(X * eta[:, None]), np.eye(X.shape[1]), atol=1e-6, rtol=1e-5): dev = np.max(np.abs(X.T.dot(X * eta[:, None]) - np.eye(X.shape[1]))) raise ValueError( f"Schur vectors appear to not be D-orthogonal. The largets deviation of X^T D X from the " f"identity matrix is {dev}" ) # Raise, if X doesn't fullfill the invariant subspace condition! dp = np.dot(P, sp.csr_matrix(X) if issparse(P) else X) dummy = subspace_angles(dp.toarray() if issparse(dp) else dp, np.dot(X, R)) test = np.allclose(dummy, 0.0, atol=1e-6, rtol=1e-5) test1 = dummy.shape[0] == m if not test: raise ValueError( f"According to `scipy.linalg.subspace_angles()`, X isn't an invariant " f"subspace of P, since the subspace angles between the column spaces " f"of P*X and X*R aren't near zero. The subspace angles are: `{dummy}`." ) if not test1: warnings.warn( "According to `scipy.linalg.subspace_angles()` the dimension of the " f"column spaces of P*X and/or X*R is not equal to {m}.", stacklevel=2, ) return X, R, eigenvalues @d.dedent def _initialize_rot_matrix(X: ArrayLike) -> ArrayLike: """ Initialize the rotation matrix. Parameters ---------- X %(Q_sort)s Returns ------- Initial (non-optimized) rotation matrix of shape `(m, m)`. """ # Search start simplex vertices ('inner simplex algorithm'). index = _indexsearch(X) # Local copy of the Schur vectors. # Xc = np.copy(X) # Raise or warn if condition number is (too) high. condition = np.linalg.cond(X[index, :]) if condition >= (1.0 / EPS): raise ValueError( f"The condition number {condition} of the matrix of start simplex vertices " "X[index, :] is too high for safe inversion (to build the initial rotation matrix)." ) if condition > 1e4: warnings.warn( f"The condition number {condition} of the matrix of start simplex vertices " "X[index, :] is quite high for safe inversion (to build the initial rotation matrix).", stacklevel=2, ) # Compute transformation matrix rot_matrix as initial guess for local optimization (maybe not feasible!). return np.linalg.pinv(X[index, :]) @d.dedent def _indexsearch(X: ArrayLike) -> ArrayLike: """ Find a simplex structure in the data. Parameters ---------- X %(Q_sort)s Returns ------- Vector of shape `(m,)` with indices of data points that constitute the vertices of a simplex. """ n, m = X.shape # Sanity check. if n < m: raise ValueError( f"The Schur vector matrix of shape {X.shape} has more columns than rows. " f"You can't get a {m}-dimensional simplex from {n} data vectors." ) # Check if the first, and only the first eigenvector is constant. diffs = np.abs(np.max(X, axis=0) - np.min(X, axis=0)) if not np.isclose(1.0 + diffs[0], 1.0, rtol=1e-6): raise ValueError( f"First Schur vector is not constant 1. This indicates that the Schur vectors " f"are incorrectly sorted. Cannot search for a simplex structure in the data. The largest deviation from 1 " f"is {diffs[0]}." ) if not np.all(diffs[1:] > 1e-6): which = np.sum(diffs[1:] <= 1e-6) raise ValueError( f"{which} Schur vector(s) after the first one are constant. Probably the Schur vectors " "are incorrectly sorted. Cannot search for a simplex structure in the data." ) # local copy of the eigenvectors ortho_sys = np.copy(X) index = np.zeros(m, dtype=np.int64) max_dist = 0.0 # First vertex: row with the largest norm. for i in range(n): dist = np.linalg.norm(ortho_sys[i, :]) if dist > max_dist: max_dist = dist # type: ignore[assignment] index[0] = i # Translate coordinates to make the first vertex the origin. ortho_sys -= np.ones((n, 1)).dot(ortho_sys[index[0], np.newaxis]) # Would be shorter, but less readable: ortho_sys -= X[index[0], np.newaxis] # All further vertices as rows with maximum distance to existing subspace. for j in range(1, m): max_dist = 0.0 temp = np.copy(ortho_sys[index[j - 1], :]) for i in range(n): sclprod = ortho_sys[i, :].dot(temp) ortho_sys[i, :] -= sclprod * temp distt = np.linalg.norm(ortho_sys[i, :]) if distt > max_dist: # and i not in index[0:j]: #in _pcca_connected_isa() of pcca.py max_dist = distt # type: ignore[assignment] index[j] = i ortho_sys /= max_dist return index @d.dedent def _objective(alpha: ArrayLike, X: ArrayLike) -> float: """ Compute objective function value. Parameters ---------- alpha Vector of shape `((m - 1) ^ 2,)` containing the flattened and cropped rotation matrix ``rot_matrix[1:, 1:]``. X %(Q_sort)s Returns ------- Current value of the objective function :math:`f = m - trace(S)` (Eq. 16 from [Roeblitz13]_). """ # Dimensions. n, m = X.shape k = m - 1 # Initialize rotation matrix. rot_mat = np.zeros((m, m), dtype=np.float64) # Sanity checks. if alpha.shape[0] != k**2: raise ValueError( "The shape of alpha doesn't match with the shape of X: " f"It is not a ({k}^2,)-vector, but of dimension {alpha.shape}. X is of shape `{X.shape}`." ) # Now reshape alpha into a (k,k)-matrix. rot_crop_matrix = np.reshape(alpha, (k, k)) # Complete rot_mat to meet constraints (positivity, partition of unity). rot_mat[1:, 1:] = rot_crop_matrix rot_mat = _fill_matrix(rot_mat, X) # Compute value of the objective function. # from Matlab: optval = m - trace( diag(1 ./ A(1,:)) * (A' * A) ) return m - np.trace(np.diag(1.0 / rot_mat[0, :]).dot(rot_mat.conj().T.dot(rot_mat))) # type: ignore[return-value] @d.dedent def _opt_soft(X: ArrayLike, rot_matrix: ArrayLike) -> Tuple[ArrayLike, ArrayLike, float]: r""" Optimize the G-PCCA rotation matrix such that the memberships are exclusively non-negative and compute the membership matrix. Parameters ---------- X %(Q_sort)s rot_matrix Initial (non-optimized) rotation matrix of shape `(m, m)`. Returns ------- Triple of the following: rot_matrix %(rot_matrix_ret)s chi %(chi_ret)s fopt Optimal value of the objective function :math:`f_{opt} = m - \\mathtt{trace}(S)` (Eq. 16 from [Roeblitz13]_). """ # noqa: D205, D400 n, m = X.shape # Sanity checks. if not (rot_matrix.shape[0] == rot_matrix.shape[1]): raise ValueError("Rotation matrix isn't quadratic.") if not (rot_matrix.shape[0] == m): raise ValueError("The dimensions of the rotation matrix don't match with the number of Schur vectors.") if rot_matrix.shape[0] < 2: raise ValueError(f"Expected the rotation matrix to be at least of shape (2, 2), found {rot_matrix.shape}.") # Reduce optimization problem to size (m-1)^2 by cropping the first row and first column from rot_matrix rot_crop_matrix = rot_matrix[1:, 1:] # Now reshape rot_crop_matrix into a linear vector alpha. k = m - 1 alpha = np.reshape(rot_crop_matrix, k**2) # TODO: Implement Gauss Newton Optimization to speed things up esp. for m > 10 alpha, fopt, _, _, _ = fmin(_objective, alpha, args=(X,), full_output=True, disp=False) # Now reshape alpha into a (k,k)-matrix. rot_crop_matrix = np.reshape(alpha, (k, k)) # Complete rot_mat to meet constraints (positivity, partition of unity). rot_matrix[1:, 1:] = rot_crop_matrix rot_matrix = _fill_matrix(rot_matrix, X) # Compute the membership matrix. chi = np.dot(X, rot_matrix) # Check for negative elements in chi and handle them. if np.any(chi < 0.0): if np.any(chi < -1e4 * EPS): min_el = np.min(chi) raise ValueError(f"Some elements of chi are significantly negative. The minimal element in chi is {min_el}") else: chi[chi < 0.0] = 0.0 chi = np.true_divide(1.0, np.sum(chi, axis=1))[:, np.newaxis] * chi if not np.allclose(np.sum(chi, axis=1), 1.0, atol=1e-8, rtol=1e-5): dev = np.max(np.abs(np.sum(chi, axis=1) - 1.0)) raise ValueError( f"The rows of chi don't sum up to 1.0 after rescaling. Maximum deviation from 1 is {dev}" ) return rot_matrix, chi, fopt @d.dedent def _fill_matrix(rot_matrix: ArrayLike, X: ArrayLike) -> ArrayLike: """ Make the rotation matrix feasible. Parameters ---------- rot_matrix (Infeasible) rotation matrix of shape `(m, m)`. X %(Q_sort)s Returns ------- Feasible rotation matrix of shape `(m, m)`. """ n, m = X.shape # Sanity checks. if not (rot_matrix.shape[0] == rot_matrix.shape[1]): raise ValueError("Rotation matrix isn't quadratic.") if not (rot_matrix.shape[0] == m): raise ValueError("The dimensions of the rotation matrix don't match with the number of Schur vectors.") # Compute first column of rot_mat by row sum condition. rot_matrix[1:, 0] = -np.sum(rot_matrix[1:, 1:], axis=1) # Compute first row of A by maximum condition. dummy = -np.dot(X[:, 1:], rot_matrix[1:, :]) rot_matrix[0, :] = np.max(dummy, axis=0) # Reskale rot_mat to be in the feasible set. rot_matrix = rot_matrix / np.sum(rot_matrix[0, :]) # Make sure, that there are no zero or negative elements in the first row of A. if np.any(rot_matrix[0, :] == 0): raise ValueError("First row of rotation matrix has elements = 0.") if np.min(rot_matrix[0, :]) < 0: raise ValueError("First row of rotation matrix has elements < 0.") return rot_matrix @d.dedent def _cluster_by_isa(X: ArrayLike) -> Tuple[ArrayLike, float]: """ Classification of dynamical data based on `m` orthonormal Schur vectors of the (row-stochastic) transition matrix. Hereby `m` determines the number of clusters to cluster the data into. The applied method is the Inner Simplex Algorithm (ISA). Constraint: The Schur vector matrix `X` matrix needs to contain at least `m` Schur vectors. This function assumes that the state space is fully connected. Parameters ---------- X %(Q_sort)s Returns ------- Tuple of the following: chi %(chi_ret)s minChi minChi indicator, see [Roeblitz13]_ and [Reuter18]_. """ # compute rotation matrix rot_matrix = _initialize_rot_matrix(X) # Compute the membership matrix. chi = np.dot(X, rot_matrix) # compute the minChi indicator minChi = np.amin(chi) return chi, minChi @d.dedent def _gpcca_core(X: ArrayLike) -> Tuple[ArrayLike, ArrayLike, float]: r""" Core of the G-PCCA spectral clustering method with optimized memberships [Reuter18]_, [Reuter19]_. Clusters the dominant `m` Schur vectors of a transition matrix. This algorithm generates a fuzzy clustering such that the resulting membership functions are as crisp (characteristic) as possible. Parameters ---------- X %(Q_sort)s Returns ------- Triple of the following: chi %(chi_ret)s rot_matrix %(rot_matrix_ret)s crispness %(crispness_ret)s """ m = np.shape(X)[1] rot_matrix = _initialize_rot_matrix(X) rot_matrix, chi, fopt = _opt_soft(X, rot_matrix) # calculate crispness of the decomposition of the state space into m clusters crispness = (m - fopt) / m return chi, rot_matrix, crispness @d.dedent def _coarsegrain(P: Union[ArrayLike, spmatrix], eta: ArrayLike, chi: ArrayLike) -> ArrayLike: r""" Coarse-grain `P` such that the (dominant) Perron eigenvalues are preserved. Uses: ..math: P_c = (\chi^T D \chi)^{-1} (\chi^T D P \chi) with :math:`D` being a diagonal matrix with `eta` on its diagonal [Reuter18]_, [Reuter19]_. Parameters ---------- %(P)s %(eta)s chi %(chi_ret)s Returns ------- The coarse-grained row-stochastic transition matrix. """ # Matlab: Pc = pinv(chi'*diag(eta)*chi)*(chi'*diag(eta)*P*chi) # need to make sure here that memory does not explode, and P is never densified W = np.linalg.pinv(chi.T.dot(chi * eta[:, None])) V = chi.T * eta if issparse(P): V = sp.csr_matrix(V) A = V.dot(P).dot(chi) return W.dot(A) @d.dedent def gpcca_coarsegrain( P: Union[ArrayLike, spmatrix], m: Union[int, Tuple[int, int], List[int], Dict[str, int]], eta: Optional[ArrayLike] = None, z: Literal["LM", "LR"] = "LM", method: str = DEFAULT_SCHUR_METHOD, ) -> ArrayLike: r""" Coarse-grain the transition matrix `P` into `m` sets using G-PCCA [Reuter18]_, [Reuter19]_. Performs optimized spectral clustering via G-PCCA and coarse-grains `P` such that the dominant Perron eigenvalues are preserved using: .. math:: P_c = (\chi^T D \chi)^{-1} (\chi^T D P \chi) with :math:`D` being a diagonal matrix with `eta` on its diagonal [Reuter18]_, [Reuter19]_. Parameters ---------- %(P)s %(m_optimize)s %(eta)s If `None` (default), uniform distribution is used. %(z_P)s %(method)s See the `installation <https://pygpcca.readthedocs.io/en/latest/installation.html>`_ instructions for more information. Returns ------- The coarse-grained row-stochastic transition matrix. References ---------- If you use this code or parts of it, please cite [Reuter19]_. """ # Matlab: Pc = pinv(chi'*diag(eta)*chi)*(chi'*diag(eta)*P*chi) chi = GPCCA(P, eta=eta, z=z, method=method).optimize(m).memberships return _coarsegrain(P, eta=eta, chi=chi) @d.dedent class GPCCA: """ G-PCCA spectral clustering method with optimized memberships [Reuter18]_, [Reuter19]_. Clusters the dominant `m` Schur vectors of a transition matrix. This algorithm generates a fuzzy clustering such that the resulting membership functions are as crisp (characteristic) as possible. Parameters ---------- %(P)s %(eta)s If `None`, uniform distribution is used. %(z_P)s %(method)s See the `installation <https://pygpcca.readthedocs.io/en/latest/installation.html>`_ instructions for more information. References ---------- If you use this code or parts of it, please cite [Reuter19]_. """ def __init__( self, P: Union[ArrayLike, spmatrix], eta: Optional[ArrayLike] = None, z: Literal["LM", "LR"] = "LM", method: str = DEFAULT_SCHUR_METHOD, ): if not is_transition_matrix(P): raise ValueError("Input matrix P is not a transition matrix.") if z not in ["LM", "LR"]: raise ValueError("You didn't give a valid sorting criterion z. Valid options are: 'LM', 'LR'.") if method not in ["brandts", "krylov"]: raise ValueError( "You didn't give a valid method to determine the invariant subspace. " "Valid options are: 'brandts', 'krylov'." ) n = np.shape(P)[0] if eta is None: eta = np.true_divide(np.ones(P.shape[0]), P.shape[0]) if len(eta) != n: raise ValueError(f"eta vector length ({len(eta)}) doesn't match with the shape of " f"P[{n}, {n}].") self._P = P.astype(np.float64) self._eta: ArrayLike = eta.astype(np.float64) self._z: str = z self._method: str = method # _p stands for precomputed self._p_X: OArray = None self._p_R: OArray = None self._p_eigenvalues: OArray = None # these are the actual values, accessed by the properties self._X: OArray = None self._R: OArray = None self._eigenvalues: OArray = None self._top_eigenvalues: OArray = None self._m_opt: Optional[int] = None self._chi: OArray = None self._rot_matrix: OArray = None self._crispness_opt: Optional[float] = None self._crispness: OArray = None self._pi: OArray = None self._pi_coarse: OArray = None self._eta_coarse: OArray = None self._P_coarse: OArray = None def _do_schur_helper(self, m: int) -> None: n = np.shape(self._P)[0] if self._p_X is not None and self._p_R is not None and self._p_eigenvalues is not None: Xdim1, Xdim2 = self._p_X.shape Rdim1, Rdim2 = self._p_R.shape if Xdim1 != n: raise ValueError( f"The first dimension of X is `{Xdim1}`. This doesn't match with the dimension of P[{n}, {n}]." ) if Rdim1 != Rdim2: raise ValueError("The Schur form R is not quadratic.") if Xdim2 != Rdim1: raise ValueError( f"The first dimension of X is `{Xdim1}`. " f"This doesn't match with the dimension of R[{Rdim1}, {Rdim2}]." ) if Rdim2 < m: self._p_X, self._p_R, self._p_eigenvalues = _do_schur( self._P, eta=self._eta, m=m, z=self._z, method=self._method ) else: # if we are using pre-computed decomposition, check splitting if m < n: if len(self._p_eigenvalues) < m: raise ValueError( f"Can't check complex conjugate block splitting for {m} clusters with only " f"{len(self._p_eigenvalues)} eigenvalues." ) else: if _check_conj_split(self._p_eigenvalues[:m]): raise ValueError( f"Clustering into {m} clusters will split complex conjugate eigenvalues. " f"Request one cluster more or less." ) logging.info("Using pre-computed Schur decomposition") else: self._p_X, self._p_R, self._p_eigenvalues = _do_schur( self._P, eta=self._eta, m=m, z=self._z, method=self._method ) def minChi(self, m_min: int, m_max: int) -> List[float]: r""" Calculate the minChi indicator (see [Reuter18]_) for every :math:`m \in [m_{min},m_{max}]`. The minChi indicator can be used to determine an interval :math:`I \subset [m_{min},m_{max}]` of good (potentially optimal) numbers of clusters. Afterwards either one :math:`m \in I`(with maximal `minChi`) or the whole interval :math:`I` is chosen as input to :meth:`optimize` for further optimization. Parameters ---------- m_min Minimal number of clusters to group into. m_max Maximal number of clusters to group into. Returns ------- List of minChi indicators for cluster numbers :math:`m \in [m_{min},m_{max}]`, see [Roeblitz13]_, [Reuter18]_. """ # Validate Input. if m_min >= m_max: raise ValueError(f"m_min ({m_min}) must be smaller than m_max ({m_max}).") if m_min in [0, 1]: raise ValueError(f"There is no point in clustering into `{m_min}` clusters.") # Calculate Schur matrix R and Schur vector matrix X, if not adequately given. self._do_schur_helper(m_max) if TYPE_CHECKING: assert isinstance(self._p_X, np.ndarray) minChi_list: List[float] = [] for m in range(m_min, m_max + 1): # Xm = np.copy(X[:, :m]) _, minChi = _cluster_by_isa(self._p_X[:, :m]) minChi_list.append(minChi) return minChi_list # G-PCCA coarse-graining @d.dedent def optimize( self, m: Union[int, Tuple[int, int], List[int], Dict[str, int]], ) -> "GPCCA": r""" Full G-PCCA spectral clustering method with optimized memberships [Reuter18]_, [Reuter19]_. It also has the option to optimize the number of clusters (macrostates) `m` as well. If a single integer `m` is given, the method clusters the dominant `m` Schur vectors of the :attr:`transition_matrix`. The algorithm generates a fuzzy clustering such that the resulting membership functions `chi` are as crisp (characteristic) as possible, given `m`. Instead of a single number of clusters `m`, a :class:`tuple` or a :class:`dict` ``{'m_min': int, 'm_max': int}`` containing a minimum and a maximum number of clusters can be given. This results in repeated execution of the G-PCCA core algorithm for :math:`m \in [m_{min},m_{max}]`. Among the resulting clusterings, the sharpest/crispest one (with maximal `crispness`) will be selected. Parameters ---------- %(m_optimize)s See :meth:`minChi` for selection of good (potentially optimal) number of clusters. Returns ------- Returns self and updates the following attributes: - :attr:`coarse_grained_input_distribution` - :attr:`coarse_grained_stationary_distribution` - :attr:`coarse_grained_transition_matrix` - :attr:`crispness_values` - :attr:`dominant_eigenvalues` - :attr:`input_distribution` - :attr:`macrostate_assignment` - :attr:`macrostate_sets` - :attr:`memberships` - :attr:`n_m` - :attr:`optimal_crispness` - :attr:`rotation_matrix` - :attr:`schur_matrix` - :attr:`schur_vectors` - :attr:`stationary_probability` - :attr:`top_eigenvalues` - :attr:`transition_matrix` """ n = self._P.shape[0] # extract m_min, m_max, if given, else take single m if isinstance(m, (tuple, list)): if len(m) != 2: raise ValueError(f"Expected range to be of size 2, found `{len(m)}`.") m_list = m if m[0] >= m[1]: raise ValueError(f"m_min ({m[0]}) must be smaller than m_max ({m[1]}).") elif isinstance(m, dict): m_min = m["m_min"] m_max = m["m_max"] if m_min >= m_max: raise ValueError(f"m_min ({m_min}) must be smaller than m_max ({m_max}).") m_list = [m_min, m_max] elif isinstance(m, int): m_list = [m] else: raise TypeError(f"Invalid type `{type(m).__name__}`.") # validate input if max(m_list) > n: raise ValueError( f"Number of macrostates `({max(m_list)})` exceeds number " f"of states of the transition matrix `({n})`." ) if min(m_list) in [0, 1]: raise ValueError(f"There is no point in clustering into `{m}` clusters.") # The following code enclosed by >>>... ...<<< originates (with some adjustments) from MSMTools # Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER). # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> # test connectivity components = connected_sets(self._P) n_components = len(components) # Store components as closed (with positive equilibrium distribution) # or as transition states (with vanishing equilibrium distribution). closed_components = [] for i in range(n_components): component = components[i] rest = list(set(range(n)) - set(component)) # is component closed? if np.sum(self._P[component, :][:, rest]) == 0: closed_components.append(component) n_closed_components = len(closed_components) # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # Calculate Schur matrix R and Schur vector matrix X, if not adequately given. self._do_schur_helper(max(m_list)) if TYPE_CHECKING: assert isinstance(self._p_X, np.ndarray) assert isinstance(self._p_R, np.ndarray) assert isinstance(self._p_eigenvalues, np.ndarray) # Initialize lists to collect results. chi_list: List[ArrayLike] = [] rot_matrix_list: List[ArrayLike] = [] crispness_list: List[float] = [] # Iterate over m for m in range(min(m_list), max(m_list) + 1): if len(self._p_eigenvalues) < m: raise ValueError( f"Can't check complex conjugate block splitting for {m} clusters with only " f"{len(self._p_eigenvalues)} eigenvalues." ) if _check_conj_split(self._p_eigenvalues[:m]): warnings.warn( f"Clustering into {m} clusters will split complex conjugate eigenvalues. " f"Skipping clustering into {m} clusters.", stacklevel=2, ) crispness_list.append(0.0) chi_list.append(np.zeros((n, m))) rot_matrix_list.append(np.zeros((m, m))) continue # Reduce X according to m and make a work copy. # Xm = np.copy(X[:, :m]) chi, rot_matrix, crispness = _gpcca_core(self._p_X[:, :m]) # check if we have at least m dominant sets. If less than m, we warn. nmeta = np.count_nonzero(chi.sum(axis=0)) if m > nmeta: crispness_list.append(-crispness) warnings.warn( f"`{m}` macrostates requested, but transition matrix only has " f"`{nmeta}` macrostates. Request less macrostates.", stacklevel=2, ) # Check, if we have enough clusters to support the disconnected sets. elif m < n_closed_components: crispness_list.append(-crispness) warnings.warn( f"Number of macrostates `({m})` is too small. " f"Transition matrix has `{n_closed_components}` disconnected components.", stacklevel=2, ) else: crispness_list.append(crispness) chi_list.append(chi) rot_matrix_list.append(rot_matrix) if np.any(np.array(crispness_list) > 0.0): if len(m_list) > 1 and max(m_list) == n: warnings.warn( f"Clustering {n} data points into {max(m_list)} clusters is always perfectly crisp. " f"Thus m={max(m_list)} won't be included in the search for the optimal cluster number.", stacklevel=2, ) opt_idx = int(np.argmax(crispness_list[:-1])) else: opt_idx = int(np.argmax(crispness_list)) else: raise ValueError("Clustering wasn't successful. Try different cluster numbers.") self._m_opt = min(m_list) + opt_idx self._chi = chi_list[opt_idx] self._rot_matrix = rot_matrix_list[opt_idx] self._crispness = np.array(crispness_list) self._crispness_opt = crispness_list[opt_idx] self._X = self._p_X[:, : self._m_opt] self._R = self._p_R[: self._m_opt, : self._m_opt] self._top_eigenvalues = self._p_eigenvalues[: self._m_opt] self._eigenvalues = self._p_eigenvalues[: max(m_list)] if TYPE_CHECKING: assert isinstance(self.memberships, np.ndarray) # coarse-grained stationary distribution self._pi_coarse = ( None if self.stationary_probability is None else np.dot(self.memberships.T, self.stationary_probability) ) # coarse-grained input (initial) distribution of states self._eta_coarse = np.dot(self.memberships.T, self.input_distribution) # coarse-grain transition matrix self._P_coarse = _coarsegrain(self.transition_matrix, eta=self.input_distribution, chi=self.memberships) return self @property def coarse_grained_input_distribution(self) -> OArray: r""" Coarse grained input distribution of shape `(n_m,)`. .. math:: \eta_c = \chi^T \eta """ return self._eta_coarse @property def coarse_grained_stationary_probability(self) -> OArray: r""" Coarse grained stationary distribution of shape `(n_m,)`. .. math:: \pi_c = \chi^T \pi """ return self._pi_coarse @property def coarse_grained_transition_matrix(self) -> OArray: r""" Coarse grained transition matrix of shape `(n_m, n_m)`. .. math:: P_c = (\chi^T D \chi)^{-1} (\chi^T D P \chi) with :math:`D` being a diagonal matrix with :math:`\eta` on its diagonal. """ return self._P_coarse @property # type: ignore[misc] @d.dedent def crispness_values(self) -> OArray: """ Vector of crispness values for clustering into the requested cluster numbers. %(crispness_ret)s """ return self._crispness @property # type: ignore[misc] @d.dedent def dominant_eigenvalues(self) -> OArray: """ Dominant :attr:`n_m` eigenvalues of `P`. Vector of shape `(n_m,)` containing the `n_m` dominant eigenvalues of `P`. """ return self._top_eigenvalues @property def input_distribution(self) -> ArrayLike: r""" Input probability distribution of the (micro)states. In theory :math:`\eta` can be an arbitrary distribution as long as it is a valid probability distribution (i.e., sums up to 1). A neutral and valid choice would be the uniform distribution (default). In case of a reversible transition matrix, the stationary distribution :math:`\pi` can (but don't has to) be used here. In case of a non-reversible `P`, some initial or average distribution of the states might be chosen instead of the uniform distribution. Vector of shape `(n,)` which sums to 1. """ return self._eta @property def macrostate_assignment(self) -> OArray: """ Crisp clustering using G-PCCA. This is recommended only for visualization purposes. You *cannot* compute any actual quantity of the coarse-grained kinetics without employing the fuzzy memberships! Returns ------- Integer vector of shape `(n,)` containing the macrostate each microstate is located in. Credits ------- The code and docstring of this property origins (with some adjustments) from MSMTools, Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER). """ return None if self.memberships is None else np.argmax(self.memberships, axis=1) # type: ignore[return-value] @property def macrostate_sets(self) -> Optional[List[ArrayLike]]: """ Crisp clustering using G-PCCA. This is recommended only for visualization purposes. You *cannot* compute any actual quantity of the coarse-grained kinetics without employing the fuzzy memberships! Returns ------- A list of length equal to :attr:`n_m`. Each element is an array with microstate indexes contained in it. Credits ------- The code and docstring of this property origins (with some adjustments) from MSMTools, Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER). """ return ( None if self.macrostate_assignment is None or self.n_m is None else [np.where(self.macrostate_assignment == i)[0] for i in range(self.n_m)] ) @property def memberships(self) -> OArray: r""" Array of shape `(n, n_m)` containing the membership :math:`\chi_{ij}` (or probability) of each microstate :math:`i` (to be assigned) to each macrostate or cluster :math:`j`. The rows sum to 1. """ # noqa: D205, D400 return self._chi @property def n_m(self) -> Optional[int]: """Optimal number of clusters or macrostates to group the `n` microstates into.""" return self._m_opt @property # type: ignore[misc] @d.dedent def optimal_crispness(self) -> Optional[float]: """ Crispness for clustering into :attr:`n_m` clusters. %(crispness_ret)s """ return self._crispness_opt @property def rotation_matrix(self) -> OArray: r""" Optimized rotation matrix :math:`A`. Array of shape `(n_m, n_m)` which rotates the dominant Schur vectors to yield the G-PCCA :attr:`memberships`, i.e. :math:`\chi = X A`. """ return self._rot_matrix @property def schur_matrix(self) -> OArray: r""" Ordered top left part of shape `(n_m, n_m)` of the real Schur matrix of :math:`P`. The ordered real partial Schur matrix :math:`R` of :math:`P` fulfills .. math:: P Q = Q R with the ordered matrix of dominant Schur vectors :math:`Q`. """ return self._R @property def schur_vectors(self) -> OArray: r""" Array :math:`Q` of shape `(n, n_m)` with `n_m` sorted Schur vectors in the columns. The constant Schur vector is in the first column. """ return self._X @cached_property def stationary_probability(self) -> OArray: r""" Stationary probability distribution :math:`\pi` of the microstates. Vector of shape `(n,)` which sums to 1. """ try: return stationary_distribution(self._P) except Exception as e: # noqa: B902 warnings.warn(f"Stationary distribution couldn't be calculated. Reason: {e}.", stacklevel=2) return None @property def top_eigenvalues(self) -> OArray: """ Top `m` respective `m_max` eigenvalues of `P`. If a single integer `m` was given, the upper `m` eigenvalues are returned. If a :class:`tuple` or :class:`dict` containing a minimum `m_min` and maximum number `m_max` of clusters was given, the upper `m_max` eigenvalues are returned. """ return self._eigenvalues @property def transition_matrix(self) -> Union[ArrayLike, spmatrix]: """Row-stochastic transition matrix `P`.""" return self._P def __repr__(self) -> str: return f"{self.__class__.__name__}[n={self.transition_matrix.shape[0]}, n_macrostates={self.n_m}]" def __str__(self) -> str: return repr(self)
47,636
35.503448
120
py
pyGPCCA
pyGPCCA-main/pygpcca/_sorted_schur.py
# This file is part of pyGPCCA. # # Copyright (c) 2020 Bernhard Reuter. # With contributions of Marius Lange, Michal Klein and Alexander Sikorski. # Based on the original MATLAB GPCCA code authored by Bernhard Reuter, Susanna Roeblitz and Marcus Weber, # Zuse Institute Berlin, Takustrasse 7, 14195 Berlin # We like to thank A. Sikorski and M. Weber for pointing us to SLEPc for partial Schur decompositions of # sparse matrices. # Further parts of sorted_krylov_schur were developed based on the function krylov_schur # https://github.com/zib-cmd/cmdtools/blob/1c6b6d8e1c35bb487fcf247c5c1c622b4b665b0a/src/cmdtools/analysis/pcca.py#L64, # written by Alexander Sikorski. # -------------------------------------------------------------------------------------------------------------------- # If you use this code or parts of it, cite the following reference: # -------------------------------------------------------------------------------------------------------------------- # Bernhard Reuter, Konstantin Fackeldey, and Marcus Weber, # Generalized Markov modeling of nonreversible molecular kinetics, # The Journal of Chemical Physics, 150(17):174103, 2019. # https://doi.org/10.1063/1.5064530 # -------------------------------------------------------------------------------------------------------------------- # pyGPCCA is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser # General Public License as published by the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; # without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along with this program. # If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------------------------------------------------- from typing import Tuple, Union, Literal import sys if not sys.warnoptions: import os import warnings warnings.simplefilter("always", category=UserWarning) # Change the filter in this process os.environ["PYTHONWARNINGS"] = "always::UserWarning" # Also affect subprocesses from scipy.linalg import schur, rsf2csf, subspace_angles from scipy.sparse import issparse, spmatrix, csr_matrix, isspmatrix_csr import numpy as np from pygpcca._types import ArrayLike from pygpcca.utils._docs import d from pygpcca.utils._checks import assert_petsc_real_scalar_type from pygpcca._sort_real_schur import sort_real_schur from pygpcca.utils._constants import EPS, DEFAULT_SCHUR_METHOD, NO_PETSC_SLEPC_FOUND_MSG try: import petsc4py import slepc4py assert_petsc_real_scalar_type() except (ImportError, TypeError): petsc4py = slepc4py = None __all__ = ["sorted_schur"] def _initialize_matrix(M: "petsc4py.PETSc.Mat", P: Union[ArrayLike, spmatrix]) -> None: """ Initialize PETSc matrix. Parameters ---------- M :mod:`petsc4py` matrix to initialize. P :mod:`numpy` array or :mod:`scipy` sparse matrix from which we take the data. Returns ------- Nothing, just initializes `M`. If `P` is an :class:`numpy.ndarray`, `M` will also be dense. If `P` is a :class:`scipy.sparse.spmatrix`, `M` will become a CSR matrix regardless of `P`'s sparse format. """ if issparse(P): if not isspmatrix_csr(P): warnings.warn("Only CSR sparse matrices are supported, converting.", stacklevel=2) P = csr_matrix(P) M.createAIJ(size=P.shape, csr=(P.indptr, P.indices, P.data)) # type: ignore[union-attr] else: M.createDense(list(np.shape(P)), array=P) @d.dedent def _check_conj_split(eigenvalues: ArrayLike) -> bool: """ Check whether using m eigenvalues cuts through a block of complex conjugates. If the last (`m`th) eigenvalue is not real, check whether it forms a complex conjugate pair with the second-last eigenvalue. If that is not the case, then choosing `m` clusters would cut through a block of complex conjugates. Parameters ---------- eigenvalues %(eigenvalues_m)s Returns ------- ``True`` if a block of complex conjugate eigenvalues is split, ``False`` otherwise. """ last_eigenvalue, second_last_eigenvalue = eigenvalues[-1], eigenvalues[-2] splits_block = False if last_eigenvalue.imag > EPS: splits_block = not np.isclose(last_eigenvalue, np.conj(second_last_eigenvalue)) return splits_block @d.dedent def _check_schur(P: ArrayLike, Q: ArrayLike, R: ArrayLike, eigenvalues: ArrayLike, method: str) -> None: """ Run a number of checks on the sorted Schur decomposition. Parameters ---------- %(P)s Q %(Q_sort)s R %(R_sort)s eigenvalues %(eigenvalues_m)s %(method)s Returns ------- Nothing. """ m = len(eigenvalues) # check the dimensions if Q.shape[1] != len(eigenvalues): raise ValueError(f"Number of Schur vectors does not match number of eigenvalues for `method={method!r}`.") if R.shape[0] != R.shape[1]: raise ValueError(f"R is not rectangular for `method={method!r}`.") if P.shape[0] != Q.shape[0]: raise ValueError(f"First dimension in P does not match first dimension in Q for `method={method!r}`.") if R.shape[0] != Q.shape[1]: raise ValueError(f"First dimension in R does not match second dimension in Q for `method={method!r}`.") # check whether things are real if not np.all(np.isreal(Q)): raise TypeError( f"The orthonormal basis of the subspace returned by `method={method!r}` is not real. " "G-PCCA needs real basis vectors to work." ) dummy = np.dot(P, csr_matrix(Q) if issparse(P) else Q) if issparse(dummy): dummy = dummy.toarray() dummy1 = np.dot(Q, np.diag(eigenvalues)) # dummy2 = np.concatenate((dummy, dummy1), axis=1) dummy3 = subspace_angles(dummy, dummy1) # test1 = ( ( matrix_rank(dummy2) - matrix_rank(dummy) ) == 0 ) test2 = np.allclose(dummy3, 0.0, atol=1e-8, rtol=1e-5) test3 = dummy3.shape[0] == m dummy4 = subspace_angles(dummy, Q) test4 = np.allclose(dummy4, 0.0, atol=1e-6, rtol=1e-5) if not test4: raise ValueError( f"According to `scipy.linalg.subspace_angles()`, `{method}` didn't " f"return an invariant subspace of P. The subspace angles are: `{dummy4}`." ) if not test2: warnings.warn( f"According to `scipy.linalg.subspace_angles()`, `{method}` didn't " f"return the invariant subspace associated with the top k eigenvalues, " f"since the subspace angles between the column spaces of P*Q and Q*L " f"aren't near zero (L is a diagonal matrix with the " f"sorted top eigenvalues on the diagonal). The subspace angles are: `{dummy3}`.", stacklevel=2, ) if not test3: warnings.warn( f"According to `scipy.linalg.subspace_angles()`, the dimension of the " f"column space of P*Q and/or Q*L is not equal to m (L is a diagonal " f"matrix with the sorted top eigenvalues on the diagonal), method=`{method}`.", stacklevel=2, ) @d.dedent def sorted_krylov_schur( P: Union[spmatrix, ArrayLike], k: int, z: Literal["LM", "LR"] = "LM", tol: float = 1e-16 ) -> Tuple[ArrayLike, ArrayLike, ArrayLike, ArrayLike]: r""" Calculate an orthonormal basis of the subspace associated with the `k` dominant eigenvalues of `P` using the Krylov-Schur method as implemented in SLEPc. This functions requires :mod:`petsc4py` and :mod:`slepc4py`. Parameters ---------- %(P)s %(k)s %(z)s tol Convergence criterion used by SLEPc internally. If you are dealing with ill-conditioned matrices, consider decreasing this value to get accurate results. Returns ------- Tuple of the following: R %(R_sort)s Q %(Q_sort)s eigenvalues %(eigenvalues_k)s eigenvalues_error Array of shape `(k,)` containing the error, based on the residual norm, of the `i`th eigenpair at index `i`. """ # noqa: D205, D400 # We like to thank A. Sikorski and M. Weber for pointing us to SLEPc for partial Schur decompositions of # sparse matrices. # Further parts of sorted_krylov_schur were developed based on the function krylov_schur # https://github.com/zib-cmd/cmdtools/blob/1c6b6d8e1c35bb487fcf247c5c1c622b4b665b0a/src/cmdtools/analysis/pcca.py#L64, # written by Alexander Sikorski. from petsc4py import PETSc from slepc4py import SLEPc M = PETSc.Mat().create() _initialize_matrix(M, P) # Creates EPS object. E = SLEPc.EPS() E.create() # Set the matrix associated with the eigenvalue problem. E.setOperators(M) # Select the particular solver to be used in the EPS object: Krylov-Schur E.setType(SLEPc.EPS.Type.KRYLOVSCHUR) # Set the number of eigenvalues to compute and the dimension of the subspace. E.setDimensions(nev=k) # set the tolerance used in the convergence criterion E.setTolerances(tol=tol) # Specify which portion of the spectrum is to be sought. # LARGEST_MAGNITUDE: Largest magnitude (default). # LARGEST_REAL: Largest real parts. # All possible Options can be found here: # (see: https://slepc.upv.es/slepc4py-current/docs/apiref/slepc4py.SLEPc.EPS.Which-class.html) if z == "LM": E.setWhichEigenpairs(E.Which.LARGEST_MAGNITUDE) elif z == "LR": E.setWhichEigenpairs(E.Which.LARGEST_REAL) else: raise ValueError(f"Invalid spectrum sorting options `{z}`. Valid options are: 'LM', 'LR'.") # Solve the eigensystem. E.solve() # getInvariantSubspace() gets an orthonormal basis of the computed invariant subspace. # It returns a list of vectors. # The returned real vectors span an invariant subspace associated with the computed eigenvalues. # We take the sequence of 1-D arrays and stack them as columns to make a single 2-D array. Q = np.column_stack([x.array for x in E.getInvariantSubspace()]) try: # otherwise, R would be of shape `(k + 1, k)` E.getDS().setExtraRow(False) except AttributeError: pass # Get the schur form ds = E.getDS() A = ds.getMat(SLEPc.DS.MatType.A) R = A.getDenseArray().astype(np.float64) ds.restoreMat(SLEPc.DS.MatType.A, A) # Gets the number of converged eigenpairs. nconv = E.getConverged() # Warn, if nconv smaller than k. if nconv < k: warnings.warn(f"The number of converged eigenpairs is `{nconv}`, but `{k}` were requested.", stacklevel=2) # Collect the k dominant eigenvalues. eigenvalues = [] eigenvalues_error = [] for i in range(nconv): # Get the i-th eigenvalue as computed by solve(). eigenval = E.getEigenvalue(i) eigenvalues.append(eigenval) # Computes the error (based on the residual norm) associated with the i-th computed eigenpair. eigenval_error = E.computeError(i) eigenvalues_error.append(eigenval_error) # convert lists with eigenvalues and errors to arrays (while keeping excess eigenvalues and errors) eigenvalues = np.asarray(eigenvalues) # type: ignore[assignment] eigenvalues_error = np.asarray(eigenvalues_error) # type: ignore[assignment] return R, Q, eigenvalues, eigenvalues_error # type: ignore[return-value] @d.dedent def sorted_brandts_schur(P: ArrayLike, k: int, z: Literal["LM", "LR"] = "LM") -> Tuple[ArrayLike, ArrayLike, ArrayLike]: """ Compute a sorted Schur decomposition. This function uses :mod:`scipy` for the decomposition and Brandts' method (see [Brandts02]_) for the sorting. Parameters ---------- %(P)s %(k)s %(z)s Returns ------- Tuple of the following: R %(R_sort)s Q %(Q_sort)s eigenvalues %(eigenvalues_k)s """ # Make a Schur decomposition of P. R, Q = schur(P, output="real") # Sort the Schur matrix and vectors. Q, R, ap = sort_real_schur(Q, R, z=z, b=k) # Warnings if np.any(np.array(ap) > 1.0): warnings.warn("Reordering of Schur matrix was inaccurate.", stacklevel=2) # compute eigenvalues T, _ = rsf2csf(R, Q) eigenvalues = np.diag(T)[:k] return R, Q, eigenvalues @d.dedent def sorted_schur( P: Union[ArrayLike, spmatrix], m: int, z: Literal["LM", "LR"] = "LM", method: str = DEFAULT_SCHUR_METHOD, tol_krylov: float = 1e-16, ) -> Tuple[ArrayLike, ArrayLike, ArrayLike]: """ Return ``m`` dominant real Schur vectors or an orthonormal basis spanning the same invariant subspace. Parameters ---------- %(P)s %(m)s %(z)s %(method)s %(tol_krylov)s Returns ------- Tuple of the following: R %(R_sort)s Q %(Q_sort)s eigenvalues %(eigenvalues_m)s """ if method == "krylov": if petsc4py is None or slepc4py is None: method = DEFAULT_SCHUR_METHOD warnings.warn(NO_PETSC_SLEPC_FOUND_MSG, stacklevel=2) if method != "krylov" and issparse(P): raise ValueError("Sparse implementation is only available for `method='krylov'`.") # make sure we have enough eigenvalues to check for block splitting n = P.shape[0] if m > n: raise ValueError(f"Requested more groups than states: {m} > {n}.") # compute the sorted schur decomposition if method == "brandts": R, Q, eigenvalues = sorted_brandts_schur(P=P, k=m, z=z) elif method == "krylov": R, Q, eigenvalues, _ = sorted_krylov_schur(P=P, k=m, z=z, tol=tol_krylov) else: raise ValueError(f"Unknown method `{method!r}`.") # check for splitting pairs of complex conjugates if m < n: if _check_conj_split(eigenvalues[:m]): raise ValueError( f"Clustering into {m} clusters will split complex conjugate eigenvalues. " "Request one cluster more or less." ) Q, R, eigenvalues = Q[:, :m], R[:m, :m], eigenvalues[:m] # check the returned schur decomposition _check_schur(P=P, Q=Q, R=R, eigenvalues=eigenvalues, method=method) return R, Q, eigenvalues
14,645
34.721951
122
py
pyGPCCA
pyGPCCA-main/pygpcca/utils/_utils.py
# This file is part of pyGPCCA. # # The code and documentation of the functions below origins (with some adjustments) from MSMTools. # # Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER) # # MSMTools is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from typing import List, Tuple, Union from functools import singledispatch from scipy.linalg import eig, lu_solve, lu_factor from scipy.sparse import csgraph, spmatrix, csr_matrix, isspmatrix_csr from scipy.sparse.linalg import eigs import numpy as np from pygpcca._types import ArrayLike from pygpcca.utils._docs import d from pygpcca.utils._checks import ensure_ndarray_or_sparse from pygpcca.utils._constants import EPS __all__ = [ "connected_sets", "is_transition_matrix", "stationary_distribution", ] @singledispatch def connected_sets(C: Union[ArrayLike, spmatrix], directed: bool = True) -> List[ArrayLike]: """ Compute connected sets of microstates. Connected components for a directed graph with edge-weights given by the count matrix. Parameters ---------- C Count matrix specifying edge weights. directed Whether to compute connected components for a directed or undirected graph. Returns ------- Each entry is an array containing all vertices (states) in the corresponding connected component. The list is sorted according to the size of the individual components. The largest connected set is the first entry in the list. Notes ----- Viewing the count matrix as the adjacency matrix of a (directed) graph the connected components are given by the connected components of that graph. Connected components of a graph can be efficiently computed using Tarjan's algorithm [1]_. References ---------- .. [1] Tarjan, R E. 1972. Depth-first search and linear graph algorithms. SIAM Journal on Computing 1 (2): 146-160. Credits ------- The code and docstring of this function origins (with some adjustments) from MSMTools, Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER). """ raise NotImplementedError(type(C)) @connected_sets.register(np.ndarray) def _csd(C: ArrayLike, directed: bool = True) -> List[ArrayLike]: return connected_sets(csr_matrix(C), directed=directed) @connected_sets.register(spmatrix) def _css(C: spmatrix, directed: bool = True) -> List[ArrayLike]: if not isspmatrix_csr(C): C = csr_matrix(C) M = C.shape[0] # compute connected components of C. nc is the number of components, # indices contain the component labels of the states nc, indices = csgraph.connected_components(C, directed=directed, connection="strong") # discrete states states = np.arange(M) # order indices ind = np.argsort(indices) indices = indices[ind] # order states states = states[ind] # the state index tuple is now of the following form (states, indices)= # ([s_23, s_17,...,s_3, s_2, ...], [0, 0, ..., 1, 1, ...]) # find number of states per component count = np.bincount(indices) # cumulative sum of count gives start and end indices of components csum = np.zeros(len(count) + 1, dtype=np.uint32) csum[1:] = np.cumsum(count) # generate list containing components, sort each component by increasing state label cc = [np.sort(states[csum[i] : csum[i + 1]]) for i in range(nc)] # sort by size of component - largest component first return sorted(cc, key=lambda x: -len(x)) @singledispatch def is_transition_matrix(T: Union[ArrayLike, spmatrix], tol: float = 1e-12) -> bool: r""" Check if the given matrix is a transition matrix. Parameters ---------- T Matrix to check. tol Floating point tolerance to check with. Returns ------- True, if ``T`` is a valid transition matrix, false otherwise. Notes ----- A valid transition matrix :math:`P=(p_{ij})` has non-negative elements, :math:`p_{ij} \geq 0`, and elements of each row sum up to one, :math:`\sum_j p_{ij} = 1`. Matrices wit this property are also called stochastic matrices. Credits ------- The code and docstring of this function origins (with some adjustments) from MSMTools, Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER). """ raise NotImplementedError(type(T)) @is_transition_matrix.register(spmatrix) def _itmd(T: spmatrix, tol: float = 1e-12) -> bool: T = ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind="numeric") if not isspmatrix_csr(T): T = csr_matrix(T) # compressed sparse row for fast row slicing values = T.data # non-zero entries of T # check entry-wise positivity is_positive: bool = np.allclose(values, np.abs(values), rtol=tol) # check row normalization is_normed: bool = np.allclose(T.sum(axis=1), 1.0, rtol=tol) return is_positive and is_normed @is_transition_matrix.register(np.ndarray) def _itms(T: ArrayLike, tol: float = 1e-12) -> bool: T = ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind="numeric") dim = T.shape[0] X = np.abs(T) - T x = np.sum(T, axis=1) return X.max() < 2.0 * tol and np.abs(x - np.ones(dim)).max() < dim * tol # type: ignore[no-any-return] @singledispatch @d.dedent def stationary_distribution(P: Union[ArrayLike, spmatrix]) -> ArrayLike: r""" Compute stationary distribution of stochastic matrix `P`. Parameters ---------- %(P)s Returns ------- Vector of stationary probabilities. Notes ----- The stationary distribution :math:`\pi` is the left eigenvector corresponding to the non-degenerate eigenvalue :math:`\lambda=1` of a reversible transition matrix :math:`P`, .. math:: \pi^T P =\pi^T. Credits ------- The code and docstring of this function origins (with some adjustments) from MSMTools, Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER). """ raise NotImplementedError(type(P)) @stationary_distribution.register(np.ndarray) def _sdd(P: ArrayLike) -> ArrayLike: try: mu = stationary_distribution_from_backward_iteration(P) if np.any(mu < 0): # numerical problem, fall back to more robust algorithm. raise RuntimeError("Encountered negative value.") except RuntimeError: mu = stationary_distribution_from_eigenvector(P) if np.any(mu < 0): # still? Then set to 0 and renormalize mu = np.maximum(mu, 0.0) mu /= mu.sum() # check whether this really is a stationary distribution _is_stationary_distribution(P, mu) return mu def _eigs_slepc(P: spmatrix, k: int, which: "str" = "LR", tol: float = EPS) -> Tuple[ArrayLike, ArrayLike]: from petsc4py import PETSc from slepc4py import SLEPc M = PETSc.Mat().create() if not isspmatrix_csr(P): P = csr_matrix(P) M.createAIJ(size=P.shape, csr=(P.indptr, P.indices, P.data)) E = SLEPc.EPS() E.create() E.setOperators(M) E.setDimensions(k) E.setTolerances(tol=tol) if which == "LR": E.setWhichEigenpairs(E.Which.LARGEST_REAL) elif which == "LM": E.setWhichEigenpairs(E.Which.LARGEST_MAGNITUDE) else: raise NotImplementedError(f"`which={which}` is not implemented.") E.solve() nconv = E.getConverged() if nconv < k: raise ValueError(f"Requested `{k}` eigenvalues/vectors, but only `{nconv}` converged.") xr, _ = M.getVecs() xi, _ = M.getVecs() eigenvalues, eigenvectors = [], [] for i in range(k): # Get the i-th eigenvalue as computed by solve(). eigenvalues.append(E.getEigenpair(i, xr, xi)) if eigenvalues[-1].imag != 0.0: eigenvectors.append([complex(r, i) for r, i in zip(xr.getArray(), xi.getArray())]) else: eigenvectors.append(list(xr.getArray())) return np.asarray(eigenvalues), np.asarray(eigenvectors).T @stationary_distribution.register(spmatrix) def _sds(P: spmatrix) -> ArrayLike: # get the top two eigenvalues and vecs so that we can check for irreducibility try: vals, vecs = _eigs_slepc(P.T, k=2, which="LR") except ImportError: vals, vecs = eigs(P.T, k=2, which="LR", ncv=None) # check for irreducibility if np.allclose(vals, 1, rtol=1e2 * EPS, atol=1e2 * EPS): second_largest = np.min(vals) raise ValueError(f"This matrix is reducible. The second largest eigenvalue is {second_largest}.") # sort by real part and take the top one p = np.argsort(vals.real)[::-1] vecs = vecs[:, p] top_vec = vecs[:, 0] # check for imaginary component imaginary_component = top_vec.imag if not np.allclose(imaginary_component, 0, rtol=EPS, atol=EPS): raise ValueError("Top eigenvector has imaginary component.") top_vec = top_vec.real # check the sign structure if not (top_vec > -1e4 * EPS).all() and not (top_vec < 1e4 * EPS).all(): el_min, el_max = np.min(top_vec), np.max(top_vec) raise ValueError(f"Top eigenvector has both positive and negative entries. It has range = [{el_min}, {el_max}]") top_vec = np.abs(top_vec) pi = top_vec / np.sum(top_vec) # check whether this really is a stationary distribution _is_stationary_distribution(P, pi) # normalize to 1 and return return pi def backward_iteration(A: ArrayLike, mu: float, x0: ArrayLike, tol: float = 1e-14, maxiter: int = 100) -> ArrayLike: """ Find eigenvector to approximate eigenvalue via backward iteration. Parameters ---------- A Matrix for which eigenvector is desired. mu Approximate eigenvalue for desired eigenvector. x0 Initial guess for eigenvector. tol Tolerance parameter for termination of iteration. Returns ------- Eigenvector to approximate eigenvalue ``mu``. Credits ------- The code and docstring of this function origins (with some adjustments) from MSMTools, Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER). """ T = A - mu * np.eye(A.shape[0]) # LU-factor of T lupiv = lu_factor(T) # starting iterate with ||y_0||=1 r0 = 1.0 / np.linalg.norm(x0) y0 = x0 * r0 # local variables for inverse iteration y = 1.0 * y0 r = 1.0 * r0 for _ in range(maxiter): x = lu_solve(lupiv, y) r = 1.0 / np.linalg.norm(x) y = x * r if r <= tol: return y raise RuntimeError(f"Failed to converge after `{maxiter}` iterations, residuum is `{r}`.") @d.dedent def stationary_distribution_from_backward_iteration(P: ArrayLike, eps: float = 1e-15) -> ArrayLike: """ Fast computation of the stationary vector using backward iteration. Parameters ---------- %(P)s eps Perturbation parameter for the true eigenvalue. Returns ------- Stationary vector. Credits ------- The code and docstring of this function origins (with some adjustments) from MSMTools, Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER). """ A = np.transpose(P) mu = 1.0 - eps x0 = np.ones(P.shape[0]) y = backward_iteration(A, mu, x0) return y / np.sum(y) @d.dedent def stationary_distribution_from_eigenvector(P: ArrayLike) -> ArrayLike: r""" Compute stationary distribution of stochastic matrix `P`. The stationary distribution is the left eigenvector corresponding to the non-degenerate eigenvalue :math:`\lambda=1`. Parameters ---------- %(P)s Returns ------- Vector of stationary probabilities. Credits ------- The code and docstring of this function origins (with some adjustments) from MSMTools, Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER). """ val, L = eig(P, left=True, right=False) # sorted eigenvalues and left and right eigenvectors perm = np.argsort(val)[::-1] L = L[:, perm] # make sure that stationary distribution is non-negative and l1-normalized nu = np.abs(L[:, 0]) return nu / np.sum(nu) def _is_stationary_distribution(T: Union[ArrayLike, spmatrix], pi: ArrayLike) -> bool: # check the shapes if not T.shape[0] == T.shape[1] or not T.shape[0] == pi.shape[0]: raise ValueError("Shape mismatch.") # check for invariance if not np.allclose(T.T.dot(pi), pi, rtol=1e6 * EPS, atol=1e6 * EPS): dev = np.max(np.abs(T.T.dot(pi) - pi)) raise ValueError( f"Stationary distribution is not invariant under the transition matrix. Maximal deviation = " f"{dev}" ) # check for positivity if not (pi > -1e4 * EPS).all(): dev = np.min(pi) raise ValueError(f"Stationary distribution has negative elements. Minimal element = {dev}") # check whether it sums to one if not np.allclose(pi.sum(), 1, rtol=1e4 * EPS, atol=1e4 * EPS): dev = np.abs(pi.sum() - 1) raise ValueError(f"Stationary distribution doe not sum to one. Deviation = {dev}.") return True
13,967
31.036697
120
py
pyGPCCA
pyGPCCA-main/pygpcca/utils/_checks.py
from typing import Tuple, Union, Optional import logging from scipy.sparse import issparse, spmatrix import numpy as np from pygpcca._types import ArrayLike from pygpcca.utils._docs import d __all__ = ["ensure_ndarray_or_sparse", "assert_petsc_real_scalar_type"] @d.get_sections(base="assert_array", sections=["Parameters"]) def assert_array( A: Union[ArrayLike, spmatrix], shape: Optional[Tuple[int, ...]] = None, uniform: Optional[bool] = None, ndim: Optional[int] = None, size: Optional[int] = None, dtype: Optional[Union[type, np.dtype]] = None, # type: ignore[type-arg] kind: Optional[str] = None, ) -> None: """ Assert whether the given array or sparse matrix has the given properties. Parameters ---------- A The array-like object under investigation. shape Assert if the array has the requested shape. Be careful with vectors because this will distinguish between row vectors `(1, n)`, column vectors `(n, 1)` and arrays `(n,)`. If you want to be less specific, consider using ``size`` option. uniform If not `None`, asserts whether the array dimensions are uniform (e.g. square for a ``ndim=2`` array) or not. ndim Assert if the array has the requested dimension. size Assert if the array has the requested number of elements. dtype Assert if the array data has the requested data type. This check is strong, e.g. int and int64 are not equal. If you want a weaker check, consider the ``kind`` option. kind Check if the array is of the specified kind. Options include 'i' for integer types, 'f' for float types. Check :attr:`numpy.dtype.kind` for possible options. An additional option is 'numeric' for either :class:`integer` or :class`float`. Returns ------- Nothing, just performs aforementioned the checks. Raises ------ AssertionError If assertions have failed. """ try: if shape is not None: if not np.array_equal(np.shape(A), shape): raise AssertionError(f"Expected shape {shape}, but given array has shape {np.shape(A)}.") if uniform is not None: shapearr = np.array(np.shape(A)) is_uniform = np.count_nonzero(shapearr - shapearr[0]) == 0 if uniform and not is_uniform: raise AssertionError(f"Given array is not uniform: {shapearr}.") elif not uniform and is_uniform: raise AssertionError(f"Given array is not nonuniform: {shapearr}.") if size is not None: if not np.size(A) == size: raise AssertionError(f"Expected size {size}, but given array has size {np.size(A)}.") if ndim is not None: if not ndim == np.ndim(A): raise AssertionError(f"Expected shape {ndim} but given array has shape {np.ndim(A)}.") if dtype is not None: # now we must create an array if we don't have one yet if not isinstance(A, np.ndarray) and not issparse(A): A = np.array(A) if not np.dtype(dtype) == A.dtype: raise AssertionError(f"Expected data type {dtype} but given array has data type {A.dtype}.") if kind is not None: # now we must create an array if we don't have one yet if not isinstance(A, np.ndarray) and not issparse(A): A = np.array(A) if kind == "numeric": if not (A.dtype.kind == "i" or A.dtype.kind == "f"): raise AssertionError(f"Expected numerical data, but given array has data kind {A.dtype.kind}.") elif not A.dtype.kind == kind: raise AssertionError(f"Expected data kind {kind} but given array has data kind {A.dtype.kind}.") except AssertionError: raise except Exception as e: raise AssertionError( f"Given argument is not an array of the expected shape or type: {A}, type={type(A).__name__}." ) from e @d.dedent def ensure_ndarray_or_sparse( A: Union[ArrayLike, spmatrix], shape: Optional[Tuple[int, ...]] = None, uniform: Optional[bool] = None, ndim: Optional[int] = None, size: Optional[int] = None, dtype: Optional[Union[type, np.dtype]] = None, # type: ignore[type-arg] kind: Optional[str] = None, ) -> Union[ArrayLike, spmatrix]: """ Ensure ``A`` is an array or a sparse matrix and assert that the given parameters match. Parameters ---------- %(assert_array.parameters)s Returns ------- If ``A`` is an already valid :class:`numpy.ndarray` or :class:`scipy.sparse.spmatrix`, then it is simply returned. Otherwise, returns a :class:`numpy.ndarray` copy of the array-like object ``A``. """ if not isinstance(A, np.ndarray) and not issparse(A): try: A = np.array(A) except Exception as e: raise AssertionError(f"Given argument cannot be converted to an numpy.ndarray: {A}.") from e assert_array(A, shape=shape, uniform=uniform, ndim=ndim, size=size, dtype=dtype, kind=kind) return A def assert_petsc_real_scalar_type() -> None: """Check if PETSc was compiled using `–with-scalar-type=real`.""" try: from petsc4py import PETSc if np.isrealobj(PETSc.ScalarType()): return else: raise TypeError( "PETSc was compiled with complex scalar type. " "Please recompile PETSc with `--with-scalar-type=real` or " "provide alternative `PETSC_ARCH=...` with correct scalar type." ) except Exception as e: logging.error( f"Unable to determine PETSc's scalar type. Reason: `{e}`. Assuming true, " # noqa: G004 f"but please ensure that PETSc was compiled using `--with-scalar-type=real`.", )
5,942
39.428571
118
py
pyGPCCA
pyGPCCA-main/pygpcca/utils/__init__.py
from pygpcca.utils._utils import stationary_distribution
57
28
56
py
pyGPCCA
pyGPCCA-main/pygpcca/utils/_docs.py
from docrep import DocstringProcessor P = """\ P The transition matrix (row-stochastic).""" m = """\ m The number of clusters to group into.""" k = """\ k The number of eigenvalues and Schur vectors to sort.""" m_optimize = """\ m The number of clusters or a range where a search for potentially optimal cluster numbers is performed. Valid options are: - :class:`int`: number of clusters to group into. - :class:`tuple`: minimal and maximal number of clusters. - :class:`dict`: minimal and maximal number of clusters given as ``{'m_min': int, 'm_max': int}``.""" z = """\ z Specifies which portion of the spectrum is to be sought. The subspace returned will be associated with this part of the spectrum. Valid options are: - 'LM': largest magnitude (default). - 'LR': largest real part.""" z_P = """\ z Specifies which portion of the eigenvalue spectrum of `P` is to be sought. The returned invariant subspace of `P` will be associated with this part of the spectrum. Valid options are: - 'LM': largest magnitude (default). - 'LR': largest real part.""" method = """\ method Which method to use to determine the invariant subspace. Valid options are: - 'brandts': perform a full Schur decomposition of `P` utilizing :func:`scipy.linalg.schur` (without the intrinsic sorting option, since it is flawed) and sort the returned Schur form `R` and Schur vector matrix `Q` afterwards using a routine published by Brandts [Brandts02]_. This is well tested and thus the default method, although it is also the slowest choice. - 'krylov': calculate an orthonormal basis of the subspace associated with the `m` dominant eigenvalues of `P` using the Krylov-Schur method as implemented in ``SLEPc``. This is the fastest choice and especially suitable for very large `P`, but it is still experimental. """ tol_krylov = """\ tol_krylov The convergence criterion used by ``SLEPc`` internally. This is only relevant if you use ``method='krylov'``. If you are dealing with ill-conditioned matrices, consider decreasing this value to get accurate results.""" eta = """\ eta The input probability distribution of the (micro)states. In theory `eta` can be an arbitrary distribution as long as it is a valid probability distribution (i.e., sums up to 1). A neutral and valid choice would be the uniform distribution (default). In case of a reversible transition matrix, the stationary distribution can (but don't has to) be used here. In case of a non-reversible `P`, some initial or average distribution of the states might be chosen instead of the uniform distribution. Vector of shape `(n,)` which sums to 1.""" chi_ret = r"""An array of shape `(n, m)` containing the membership :math:`\chi_{ij}` (or probability) of each state :math:`i` (to be assigned) to each cluster :math:`j`. The rows sum up to 1.""" rot_matrix_ret = r"""The optimized rotation matrix :math:`A` of shape `(m, m)` that rotates the dominant Schur vectors to yield the G-PCCA memberships, i.e., :math:`\chi = X A`.""" crispness_ret = r"""The crispness :math:`\xi \in [0,1]` quantifies the optimality of the solution (higher is better). It characterizes how crisp (sharp) the decomposition of the state space into `m` clusters is. It is given via (Eq. 17 from [Roeblitz13]_): .. math:: \xi = (m - f_{opt}) / m = \mathtt{trace}(S) / m = \mathtt{trace}(\tilde{D} \chi^T D \chi) / m with :math:`D` being a diagonal matrix with :math:`\eta` on its diagonal. """ Q_sort = """A matrix of shape `(n, m)` with ordered `m` dominant Schur vectors in the columns. The constant Schur vector (being constantly 1) is in the first column.""" R_sort = r"""The ordered top left part of shape `(m, m)` of the real Schur matrix of `P`. The ordered real partial Schur matrix `R` of `P` fulfills .. math:: \tilde{P} Q = Q R with the ordered real matrix of dominant Schur vectors `Q`.""" eigenvalues_m = """An array of shape `(m,)` containing the `m` dominant eigenvalues of `P`.""" eigenvalues_k = """An array of shape `(k,)` containing the `k` dominant eigenvalues of `P`.""" d = DocstringProcessor( P=P, m=m, k=k, m_optimize=m_optimize, z=z, z_P=z_P, method=method, tol_krylov=tol_krylov, eta=eta, chi_ret=chi_ret, rot_matrix_ret=rot_matrix_ret, crispness_ret=crispness_ret, Q_sort=Q_sort, R_sort=R_sort, eigenvalues_m=eigenvalues_m, eigenvalues_k=eigenvalues_k, )
4,660
35.131783
109
py