input
stringlengths
11
7.65k
target
stringlengths
22
8.26k
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]: """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ # fmt: off labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"] # fmt: on doc = doclike.doc # Ensure works on both Doc and Span. if not doc.has_annotation("DEP"): raise ValueError(Errors.E029) np_deps = [doc.vocab.strings[label] for label in labels] conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") prev_end = -1 for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced if word.left_edge.i <= prev_end: continue if word.dep in np_deps: prev_end = word.right_edge.i yield word.left_edge.i, word.right_edge.i + 1, np_label elif word.dep == conj: head = word.head while head.dep == conj and head.head.i < head.i: head = head.head # If the head is an NP, and we're coordinated to it, we're an NP if head.dep in np_deps: prev_end = word.right_edge.i yield word.left_edge.i, word.right_edge.i + 1, np_label
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, leaf): self.leaf = leaf self.lchild = None self.rchild = None
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def get_leafs(self): if self.lchild == None and self.rchild == None: return [self.leaf] else: return self.lchild.get_leafs()+self.rchild.get_leafs()
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def get_level(self, level, queue): if queue == None: queue = [] if level == 1: queue.push(self) else: if self.lchild != None: self.lchild.get_level(level-1, queue) if self.rchild != None: self.rchild.get_level(level-1, queue) return queue
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def paint(self, c): self.leaf.paint(c) if self.lchild != None: self.lchild.paint(c) if self.rchild != None: self.rchild.paint(c)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, x, y, w, h): self.x = x self.y = y self.w = w self.h = h self.center = (self.x+int(self.w/2),self.y+int(self.h/2)) self.distance_from_center = sqrt((self.center[0]-MAP_WIDTH/2)**2 + (self.center[1]-MAP_HEIGHT/2)**2)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def paint(self, c): c.stroke_rectangle(self.x, self.y, self.w, self.h)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def draw_path(self,c,container): c.path(self.center[0],self.center[1],container.center[0],container.center[1])
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, w, h, color = "empty"): self.board = zeros((h,w), dtype=uint8) self.w = w self.h = h self.set_brush(color)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def set_brush(self, code): self.color = self.brushes[code]
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def stroke_rectangle(self, x, y, w, h): self.line(x,y,w,True) self.line(x,y+h-1,w,True) self.line(x,y,h,False) self.line(x+w-1,y,h,False)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def filled_rectangle(self, x, y, w, h): self.board[y:y+h,x:x+w] = self.color
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def line(self, x, y, length, horizontal): if horizontal: self.board[y,x:x+length] = self.color else: self.board[y:y+length,x] = self.color
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def path(self,x1,y1,x2,y2): self.board[y1:y2+1,x1:x2+1] = self.color
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def circle(self,x,y,r): for x_offset in range(-r,r+1): for y_offset in range(-r,r+1): if sqrt(x_offset**2+y_offset**2)<r: self.board[x+x_offset,y+y_offset] = self.color
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def draw(self): im = Image.fromarray(self.board) im.save(MAP_NAME)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __str__(self): return str(self.board)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, container): self.x = container.x+randint(1, floor(container.w/3)) self.y = container.y+randint(1, floor(container.h/3)) self.w = container.w-(self.x-container.x) self.h = container.h-(self.y-container.y) self.w -= randint(0,floor(self.w/3)) self.h -= randint(0,floor(self.w/3)) self.environment = int(min(4,10*(container.distance_from_center/MAP_WIDTH)+random()*2-1)) roll = random()*0.9+(2*container.distance_from_center/MAP_WIDTH)*0.1 self.biome = next(n for n,b in enumerate(self.biomes_CDF) if roll<b)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def paint(self,c): c.filled_rectangle(self.x, self.y,self.w, self.h)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _split_vertical(container): r1 = None r2 = None min_w = int(W_RATIO*container.h)+1 if container.w < 2*min_w: return None r1 = Container(container.x,container.y,randint(min_w, container.w-min_w),container.h) r2 = Container(container.x+r1.w,container.y,container.w-r1.w,container.h) return [r1, r2]
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _split_horizontal(container): r1 = None r2 = None min_h = int(H_RATIO*container.w)+1 if container.h < 2*min_h: return None r1 = Container(container.x,container.y,container.w,randint(min_h, container.h-min_h)) r2 = Container(container.x,container.y+r1.h,container.w,container.h-r1.h) return [r1, r2]
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def split_container(container, iter): root = Tree(container) if iter != 0: sr = random_split(container) if sr!=None: root.lchild = split_container(sr[0], iter-1) root.rchild = split_container(sr[1], iter-1) return root
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def draw_paths(c, tree): if tree.lchild == None or tree.rchild == None: return tree.lchild.leaf.draw_path(c, tree.rchild.leaf) draw_paths(c, tree.lchild) draw_paths(c, tree.rchild)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def init(num_players): global MAP_WIDTH,MAP_HEIGHT,N_ITERATIONS,H_RATIO,W_RATIO,MIN_ROOM_SIDE,CENTER_HUB_HOLE,CENTER_HUB_RADIO,MAP_NAME MAP_WIDTH=int(500*sqrt(num_players)) MAP_HEIGHT=MAP_WIDTH N_ITERATIONS=log(MAP_WIDTH*100,2) H_RATIO=0.49 W_RATIO=H_RATIO MIN_ROOM_SIDE = 32 CENTER_HUB_HOLE = 32 CENTER_HUB_RADIO = CENTER_HUB_HOLE-MIN_ROOM_SIDE/2 MAP_NAME="result%s.png"%MAP_WIDTH
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def get_attn(attn_type): if isinstance(attn_type, torch.nn.Module): return attn_type module_cls = None if attn_type is not None: if isinstance(attn_type, str): attn_type = attn_type.lower() # Lightweight attention modules (channel and/or coarse spatial). # Typically added to existing network architecture blocks in addition to existing convolutions. if attn_type == 'se': module_cls = SEModule elif attn_type == 'ese': module_cls = EffectiveSEModule elif attn_type == 'eca': module_cls = EcaModule elif attn_type == 'ecam': module_cls = partial(EcaModule, use_mlp=True) elif attn_type == 'ceca': module_cls = CecaModule elif attn_type == 'ge': module_cls = GatherExcite elif attn_type == 'gc': module_cls = GlobalContext elif attn_type == 'gca': module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False) elif attn_type == 'cbam': module_cls = CbamModule elif attn_type == 'lcbam': module_cls = LightCbamModule # Attention / attention-like modules w/ significant params # Typically replace some of the existing workhorse convs in a network architecture. # All of these accept a stride argument and can spatially downsample the input. elif attn_type == 'sk': module_cls = SelectiveKernel elif attn_type == 'splat': module_cls = SplitAttn # Self-attention / attention-like modules w/ significant compute and/or params # Typically replace some of the existing workhorse convs in a network architecture. # All of these accept a stride argument and can spatially downsample the input. elif attn_type == 'lambda': return LambdaLayer elif attn_type == 'bottleneck': return BottleneckAttn elif attn_type == 'halo': return HaloAttn elif attn_type == 'nl': module_cls = NonLocalAttn elif attn_type == 'bat': module_cls = BatNonLocalAttn # Woops! else: assert False, "Invalid attn module (%s)" % attn_type elif isinstance(attn_type, bool): if attn_type: module_cls = SEModule else: module_cls = attn_type return module_cls
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( region = dict(required=True, aliases = ['aws_region', 'ec2_region']), owner = dict(required=False, default=None), ami_id = dict(required=False), ami_tags = dict(required=False, type='dict', aliases = ['search_tags', 'image_tags']), architecture = dict(required=False), hypervisor = dict(required=False), is_public = dict(required=False), name = dict(required=False), platform = dict(required=False), sort = dict(required=False, default=None, choices=['name', 'description', 'tag']), sort_tag = dict(required=False), sort_order = dict(required=False, default='ascending', choices=['ascending', 'descending']), sort_start = dict(required=False), sort_end = dict(required=False), state = dict(required=False, default='available'), virtualization_type = dict(required=False), no_result_action = dict(required=False, default='success', choices = ['success', 'fail']), ) ) module = AnsibleModule( argument_spec=argument_spec, ) if not HAS_BOTO: module.fail_json(msg='boto required for this module, install via pip or your package manager') ami_id = module.params.get('ami_id') ami_tags = module.params.get('ami_tags') architecture = module.params.get('architecture') hypervisor = module.params.get('hypervisor') is_public = module.params.get('is_public') name = module.params.get('name') owner = module.params.get('owner') platform = module.params.get('platform') sort = module.params.get('sort') sort_tag = module.params.get('sort_tag') sort_order = module.params.get('sort_order') sort_start = module.params.get('sort_start') sort_end = module.params.get('sort_end') state = module.params.get('state') virtualization_type = module.params.get('virtualization_type') no_result_action = module.params.get('no_result_action') filter = {'state': state} if ami_id: filter['image_id'] = ami_id if ami_tags: for tag in ami_tags: filter['tag:'+tag] = ami_tags[tag] if architecture: filter['architecture'] = architecture if hypervisor: filter['hypervisor'] = hypervisor if is_public: filter['is_public'] = is_public if name: filter['name'] = name if platform: filter['platform'] = platform if virtualization_type: filter['virtualization_type'] = virtualization_type ec2 = ec2_connect(module) images_result = ec2.get_all_images(owners=owner, filters=filter) if no_result_action == 'fail' and len(images_result) == 0: module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter)) results = [] for image in images_result: data = { 'ami_id': image.id, 'architecture': image.architecture, 'description': image.description, 'is_public': image.is_public, 'name': image.name, 'owner_id': image.owner_id, 'platform': image.platform, 'root_device_name': image.root_device_name, 'root_device_type': image.root_device_type, 'state': image.state, 'tags': image.tags, 'virtualization_type': image.virtualization_type, } if image.kernel_id: data['kernel_id'] = image.kernel_id if image.ramdisk_id: data['ramdisk_id'] = image.ramdisk_id results.append(data) if sort == 'tag': if not sort_tag: module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'") results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending')) elif sort: results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending')) try: if sort and sort_start and sort_end: results = results[int(sort_start):int(sort_end)] elif sort and sort_start: results = results[int(sort_start):] elif sort and sort_end: results = results[:int(sort_end)] except TypeError: module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end") module.exit_json(results=results)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def start(self, action_name: str) -> None: """Defines how to start recording an action."""
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def install_secret_key(app, filename='secret_key'): """Configure the SECRET_KEY from a file in the instance directory. If the file does not exist, print instructions to create it from a shell with a random key, then exit. """ filename = os.path.join(app.instance_path, filename) try: app.config['SECRET_KEY'] = open(filename, 'rb').read() except IOError: print('Error: No secret key. Create it with:') full_path = os.path.dirname(filename) if not os.path.isdir(full_path): print('mkdir -p {filename}'.format(filename=full_path)) print('head -c 24 /dev/urandom > {filename}'.format(filename=filename)) sys.exit(1)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def stop(self, action_name: str) -> None: """Defines how to record the duration once an action is complete."""
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def not_found(error): return render_template('404.html'), 404
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def summary(self) -> str: """Create profiler summary in text format."""
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def after_request(response): response.headers.add('X-Test', 'This is only test.') response.headers.add('Access-Control-Allow-Origin', '*') # TODO: set to real origin return response
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def setup(self, **kwargs: Any) -> None: """Execute arbitrary pre-profiling set-up steps as defined by subclass."""
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def teardown(self, **kwargs: Any) -> None: """Execute arbitrary post-profiling tear-down steps as defined by subclass."""
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__( self, dirpath: Optional[Union[str, Path]] = None, filename: Optional[str] = None, ) -> None: self.dirpath = dirpath self.filename = filename self._output_file: Optional[TextIO] = None self._write_stream: Optional[Callable] = None self._local_rank: Optional[int] = None self._log_dir: Optional[str] = None self._stage: Optional[str] = None
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def profile(self, action_name: str) -> Generator: """ Yields a context manager to encapsulate the scope of a profiled action. Example:: with self.profile('load training data'): # load training data code The profiler will start once you've entered the context and will automatically stop once you exit the code block. """ try: self.start(action_name) yield action_name finally: self.stop(action_name)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def profile_iterable(self, iterable: Iterable, action_name: str) -> Generator: iterator = iter(iterable) while True: try: self.start(action_name) value = next(iterator) self.stop(action_name) yield value except StopIteration: self.stop(action_name) break
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _rank_zero_info(self, *args, **kwargs) -> None: if self._local_rank in (None, 0): log.info(*args, **kwargs)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _prepare_filename( self, action_name: Optional[str] = None, extension: str = ".txt", split_token: str = "-" ) -> str: args = [] if self._stage is not None: args.append(self._stage) if self.filename: args.append(self.filename) if self._local_rank is not None: args.append(str(self._local_rank)) if action_name is not None: args.append(action_name) filename = split_token.join(args) + extension return filename
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _prepare_streams(self) -> None: if self._write_stream is not None: return if self.filename: filepath = os.path.join(self.dirpath, self._prepare_filename()) fs = get_filesystem(filepath) file = fs.open(filepath, "a") self._output_file = file self._write_stream = file.write else: self._write_stream = self._rank_zero_info
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def describe(self) -> None: """Logs a profile report after the conclusion of run.""" # there are pickling issues with open file handles in Python 3.6 # so to avoid them, we open and close the files within this function # by calling `_prepare_streams` and `teardown` self._prepare_streams() summary = self.summary() if summary: self._write_stream(summary) if self._output_file is not None: self._output_file.flush() self.teardown(stage=self._stage)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _stats_to_str(self, stats: Dict[str, str]) -> str: stage = f"{self._stage.upper()} " if self._stage is not None else "" output = [stage + "Profiler Report"] for action, value in stats.items(): header = f"Profile stats for: {action}" if self._local_rank is not None: header += f" rank: {self._local_rank}" output.append(header) output.append(value) return os.linesep.join(output)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def setup( self, stage: Optional[str] = None, local_rank: Optional[int] = None, log_dir: Optional[str] = None ) -> None: """Execute arbitrary pre-profiling set-up steps.""" self._stage = stage self._local_rank = local_rank self._log_dir = log_dir self.dirpath = self.dirpath or log_dir
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def teardown(self, stage: Optional[str] = None) -> None: """ Execute arbitrary post-profiling tear-down steps. Closes the currently open file and stream. """ self._write_stream = None if self._output_file is not None: self._output_file.close() self._output_file = None # can't pickle TextIOWrapper
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __del__(self) -> None: self.teardown(stage=self._stage)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def start(self, action_name: str) -> None: raise NotImplementedError
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def stop(self, action_name: str) -> None: raise NotImplementedError
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def summary(self) -> str: raise NotImplementedError
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def local_rank(self) -> int: return 0 if self._local_rank is None else self._local_rank
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def start(self, action_name: str) -> None: pass
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def stop(self, action_name: str) -> None: pass
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, model, data): # try and import pytorch global torch if torch is None: import torch if version.parse(torch.__version__) < version.parse("0.4"): warnings.warn("Your PyTorch version is older than 0.4 and not supported.") # check if we have multiple inputs self.multi_input = False if type(data) == list: self.multi_input = True if type(data) != list: data = [data] self.data = data self.layer = None self.input_handle = None self.interim = False self.interim_inputs_shape = None self.expected_value = None # to keep the DeepExplainer base happy if type(model) == tuple: self.interim = True model, layer = model model = model.eval() self.layer = layer self.add_target_handle(self.layer) # if we are taking an interim layer, the 'data' is going to be the input # of the interim layer; we will capture this using a forward hook with torch.no_grad(): _ = model(*data) interim_inputs = self.layer.target_input if type(interim_inputs) is tuple: # this should always be true, but just to be safe self.interim_inputs_shape = [i.shape for i in interim_inputs] else: self.interim_inputs_shape = [interim_inputs.shape] self.target_handle.remove() del self.layer.target_input self.model = model.eval() self.multi_output = False self.num_outputs = 1 with torch.no_grad(): outputs = model(*data) # also get the device everything is running on self.device = outputs.device if outputs.shape[1] > 1: self.multi_output = True self.num_outputs = outputs.shape[1] self.expected_value = outputs.mean(0).cpu().numpy()
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, reddit, term, config, oauth, url=None, submission=None): super(SubmissionPage, self).__init__(reddit, term, config, oauth) self.controller = SubmissionController(self, keymap=config.keymap) if url: self.content = SubmissionContent.from_url( reddit, url, term.loader, max_comment_cols=config['max_comment_cols']) else: self.content = SubmissionContent( submission, term.loader, max_comment_cols=config['max_comment_cols']) # Start at the submission post, which is indexed as -1 self.nav = Navigator(self.content.get, page_index=-1) self.selected_subreddit = None
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def add_target_handle(self, layer): input_handle = layer.register_forward_hook(get_target_input) self.target_handle = input_handle
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def toggle_comment(self): "Toggle the selected comment tree between visible and hidden" current_index = self.nav.absolute_index self.content.toggle(current_index) # This logic handles a display edge case after a comment toggle. We # want to make sure that when we re-draw the page, the cursor stays at # its current absolute position on the screen. In order to do this, # apply a fixed offset if, while inverted, we either try to hide the # bottom comment or toggle any of the middle comments. if self.nav.inverted: data = self.content.get(current_index) if data['hidden'] or self.nav.cursor_index != 0: window = self._subwindows[-1][0] n_rows, _ = window.getmaxyx() self.nav.flip(len(self._subwindows) - 1) self.nav.top_item_height = n_rows
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def add_handles(self, model, forward_handle, backward_handle): """ Add handles to all non-container layers in the model. Recursively for non-container layers """ handles_list = [] model_children = list(model.children()) if model_children: for child in model_children: handles_list.extend(self.add_handles(child, forward_handle, backward_handle)) else: # leaves handles_list.append(model.register_forward_hook(forward_handle)) handles_list.append(model.register_backward_hook(backward_handle)) return handles_list
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def exit_submission(self): "Close the submission and return to the subreddit page" self.active = False
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def remove_attributes(self, model): """ Removes the x and y attributes which were added by the forward handles Recursively searches for non-container layers """ for child in model.children(): if 'nn.modules.container' in str(type(child)): self.remove_attributes(child) else: try: del child.x except AttributeError: pass try: del child.y except AttributeError: pass
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def refresh_content(self, order=None, name=None): "Re-download comments and reset the page index" order = order or self.content.order url = name or self.content.name with self.term.loader('Refreshing page'): self.content = SubmissionContent.from_url( self.reddit, url, self.term.loader, order=order, max_comment_cols=self.config['max_comment_cols']) if not self.term.loader.exception: self.nav = Navigator(self.content.get, page_index=-1)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def gradient(self, idx, inputs): self.model.zero_grad() X = [x.requires_grad_() for x in inputs] outputs = self.model(*X) selected = [val for val in outputs[:, idx]] grads = [] if self.interim: interim_inputs = self.layer.target_input for idx, input in enumerate(interim_inputs): grad = torch.autograd.grad(selected, input, retain_graph=True if idx + 1 < len(interim_inputs) else None, allow_unused=True)[0] if grad is not None: grad = grad.cpu().numpy() else: grad = torch.zeros_like(X[idx]).cpu().numpy() grads.append(grad) del self.layer.target_input return grads, [i.detach().cpu().numpy() for i in interim_inputs] else: for idx, x in enumerate(X): grad = torch.autograd.grad(selected, x, retain_graph=True if idx + 1 < len(X) else None, allow_unused=True)[0] if grad is not None: grad = grad.cpu().numpy() else: grad = torch.zeros_like(X[idx]).cpu().numpy() grads.append(grad) return grads
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def prompt_subreddit(self): "Open a prompt to navigate to a different subreddit" name = self.term.prompt_input('Enter page: /') if name is not None: with self.term.loader('Loading page'): content = SubredditContent.from_name( self.reddit, name, self.term.loader) if not self.term.loader.exception: self.selected_subreddit = content self.active = False
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def shap_values(self, X, ranked_outputs=None, output_rank_order="max", check_additivity=False): # X ~ self.model_input # X_data ~ self.data # check if we have multiple inputs if not self.multi_input: assert type(X) != list, "Expected a single tensor model input!" X = [X] else: assert type(X) == list, "Expected a list of model inputs!" X = [x.detach().to(self.device) for x in X] if ranked_outputs is not None and self.multi_output: with torch.no_grad(): model_output_values = self.model(*X) # rank and determine the model outputs that we will explain if output_rank_order == "max": _, model_output_ranks = torch.sort(model_output_values, descending=True) elif output_rank_order == "min": _, model_output_ranks = torch.sort(model_output_values, descending=False) elif output_rank_order == "max_abs": _, model_output_ranks = torch.sort(torch.abs(model_output_values), descending=True) else: assert False, "output_rank_order must be max, min, or max_abs!" model_output_ranks = model_output_ranks[:, :ranked_outputs] else: model_output_ranks = (torch.ones((X[0].shape[0], self.num_outputs)).int() * torch.arange(0, self.num_outputs).int()) # add the gradient handles handles = self.add_handles(self.model, add_interim_values, deeplift_grad) if self.interim: self.add_target_handle(self.layer) # compute the attributions output_phis = [] for i in range(model_output_ranks.shape[1]): phis = [] if self.interim: for k in range(len(self.interim_inputs_shape)): phis.append(np.zeros((X[0].shape[0], ) + self.interim_inputs_shape[k][1: ])) else: for k in range(len(X)): phis.append(np.zeros(X[k].shape)) for j in range(X[0].shape[0]): # tile the inputs to line up with the background data samples tiled_X = [X[l][j:j + 1].repeat( (self.data[l].shape[0],) + tuple([1 for k in range(len(X[l].shape) - 1)])) for l in range(len(X))] joint_x = [torch.cat((tiled_X[l], self.data[l]), dim=0) for l in range(len(X))] # run attribution computation graph feature_ind = model_output_ranks[j, i] sample_phis = self.gradient(feature_ind, joint_x) # assign the attributions to the right part of the output arrays if self.interim: sample_phis, output = sample_phis x, data = [], [] for k in range(len(output)): x_temp, data_temp = np.split(output[k], 2) x.append(x_temp) data.append(data_temp) for l in range(len(self.interim_inputs_shape)): phis[l][j] = (sample_phis[l][self.data[l].shape[0]:] * (x[l] - data[l])).mean(0) else: for l in range(len(X)): phis[l][j] = (torch.from_numpy(sample_phis[l][self.data[l].shape[0]:]).to(self.device) * (X[l][j: j + 1] - self.data[l])).cpu().detach().numpy().mean(0) output_phis.append(phis[0] if not self.multi_input else phis) # cleanup; remove all gradient handles for handle in handles: handle.remove() self.remove_attributes(self.model) if self.interim: self.target_handle.remove() if not self.multi_output: return output_phis[0] elif ranked_outputs is not None: return output_phis, model_output_ranks else: return output_phis
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def open_link(self): "Open the selected item with the webbrowser" data = self.get_selected_item() url = data.get('permalink') if url: self.term.open_browser(url) else: self.term.flash()
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def deeplift_grad(module, grad_input, grad_output): """The backward hook which computes the deeplift gradient for an nn.Module """ # first, get the module type module_type = module.__class__.__name__ # first, check the module is supported if module_type in op_handler: if op_handler[module_type].__name__ not in ['passthrough', 'linear_1d']: return op_handler[module_type](module, grad_input, grad_output) else: print('Warning: unrecognized nn.Module: {}'.format(module_type)) return grad_input
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def open_pager(self): "Open the selected item with the system's pager" data = self.get_selected_item() if data['type'] == 'Submission': text = '\n\n'.join((data['permalink'], data['text'])) self.term.open_pager(text) elif data['type'] == 'Comment': text = '\n\n'.join((data['permalink'], data['body'])) self.term.open_pager(text) else: self.term.flash()
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def add_interim_values(module, input, output): """The forward hook used to save interim tensors, detached from the graph. Used to calculate the multipliers """ try: del module.x except AttributeError: pass try: del module.y except AttributeError: pass module_type = module.__class__.__name__ if module_type in op_handler: func_name = op_handler[module_type].__name__ # First, check for cases where we don't need to save the x and y tensors if func_name == 'passthrough': pass else: # check only the 0th input varies for i in range(len(input)): if i != 0 and type(output) is tuple: assert input[i] == output[i], "Only the 0th input may vary!" # if a new method is added, it must be added here too. This ensures tensors # are only saved if necessary if func_name in ['maxpool', 'nonlinear_1d']: # only save tensors if necessary if type(input) is tuple: setattr(module, 'x', torch.nn.Parameter(input[0].detach())) else: setattr(module, 'x', torch.nn.Parameter(input.detach())) if type(output) is tuple: setattr(module, 'y', torch.nn.Parameter(output[0].detach())) else: setattr(module, 'y', torch.nn.Parameter(output.detach())) if module_type in failure_case_modules: input[0].register_hook(deeplift_tensor_grad)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def add_comment(self): """ Submit a reply to the selected item. Selected item: Submission - add a top level comment Comment - add a comment reply """ data = self.get_selected_item() if data['type'] == 'Submission': body = data['text'] reply = data['object'].add_comment elif data['type'] == 'Comment': body = data['body'] reply = data['object'].reply else: self.term.flash() return # Construct the text that will be displayed in the editor file. # The post body will be commented out and added for reference lines = ['# |' + line for line in body.split('\n')] content = '\n'.join(lines) comment_info = docs.COMMENT_FILE.format( author=data['author'], type=data['type'].lower(), content=content) with self.term.open_editor(comment_info) as comment: if not comment: self.term.show_notification('Canceled') return with self.term.loader('Posting', delay=0): reply(comment) # Give reddit time to process the submission time.sleep(2.0) if self.term.loader.exception is None: self.refresh_content() else: raise TemporaryFileError()
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def get_target_input(module, input, output): """A forward hook which saves the tensor - attached to its graph. Used if we want to explain the interim outputs of a model """ try: del module.target_input except AttributeError: pass setattr(module, 'target_input', input)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def delete_comment(self): "Delete the selected comment" if self.get_selected_item()['type'] == 'Comment': self.delete_item() else: self.term.flash()
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def deeplift_tensor_grad(grad): return_grad = complex_module_gradients[-1] del complex_module_gradients[-1] return return_grad
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def comment_urlview(self): data = self.get_selected_item() comment = data.get('body') or data.get('text') or data.get('url_full') if comment: self.term.open_urlview(comment) else: self.term.flash()
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def passthrough(module, grad_input, grad_output): """No change made to gradients""" return None
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _draw_item(self, win, data, inverted): if data['type'] == 'MoreComments': return self._draw_more_comments(win, data) elif data['type'] == 'HiddenComment': return self._draw_more_comments(win, data) elif data['type'] == 'Comment': return self._draw_comment(win, data, inverted) else: return self._draw_submission(win, data)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def maxpool(module, grad_input, grad_output): pool_to_unpool = { 'MaxPool1d': torch.nn.functional.max_unpool1d, 'MaxPool2d': torch.nn.functional.max_unpool2d, 'MaxPool3d': torch.nn.functional.max_unpool3d } pool_to_function = { 'MaxPool1d': torch.nn.functional.max_pool1d, 'MaxPool2d': torch.nn.functional.max_pool2d, 'MaxPool3d': torch.nn.functional.max_pool3d } delta_in = module.x[: int(module.x.shape[0] / 2)] - module.x[int(module.x.shape[0] / 2):] dup0 = [2] + [1 for i in delta_in.shape[1:]] # we also need to check if the output is a tuple y, ref_output = torch.chunk(module.y, 2) cross_max = torch.max(y, ref_output) diffs = torch.cat([cross_max - ref_output, y - cross_max], 0) # all of this just to unpool the outputs with torch.no_grad(): _, indices = pool_to_function[module.__class__.__name__]( module.x, module.kernel_size, module.stride, module.padding, module.dilation, module.ceil_mode, True) xmax_pos, rmax_pos = torch.chunk(pool_to_unpool[module.__class__.__name__]( grad_output[0] * diffs, indices, module.kernel_size, module.stride, module.padding, list(module.x.shape)), 2) org_input_shape = grad_input[0].shape # for the maxpool 1d grad_input = [None for _ in grad_input] grad_input[0] = torch.where(torch.abs(delta_in) < 1e-7, torch.zeros_like(delta_in), (xmax_pos + rmax_pos) / delta_in).repeat(dup0) if module.__class__.__name__ == 'MaxPool1d': complex_module_gradients.append(grad_input[0]) # the grad input that is returned doesn't matter, since it will immediately be # be overridden by the grad in the complex_module_gradient grad_input[0] = torch.ones(org_input_shape) return tuple(grad_input)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _draw_comment(self, win, data, inverted): n_rows, n_cols = win.getmaxyx() n_cols -= 1 # Handle the case where the window is not large enough to fit the text. valid_rows = range(0, n_rows) offset = 0 if not inverted else -(data['n_rows'] - n_rows) # If there isn't enough space to fit the comment body on the screen, # replace the last line with a notification. split_body = data['split_body'] if data['n_rows'] > n_rows: # Only when there is a single comment on the page and not inverted if not inverted and len(self._subwindows) == 0: cutoff = data['n_rows'] - n_rows + 1 split_body = split_body[:-cutoff] split_body.append('(Not enough space to display)') row = offset if row in valid_rows: attr = curses.A_BOLD attr |= (Color.BLUE if not data['is_author'] else Color.GREEN) self.term.add_line(win, '{author} '.format(**data), row, 1, attr) if data['flair']: attr = curses.A_BOLD | Color.YELLOW self.term.add_line(win, '{flair} '.format(**data), attr=attr) text, attr = self.term.get_arrow(data['likes']) self.term.add_line(win, text, attr=attr) self.term.add_line(win, ' {score} {created} '.format(**data)) if data['gold']: text, attr = self.term.guilded self.term.add_line(win, text, attr=attr) if data['stickied']: text, attr = '[stickied]', Color.GREEN self.term.add_line(win, text, attr=attr) if data['saved']: text, attr = '[saved]', Color.GREEN self.term.add_line(win, text, attr=attr) for row, text in enumerate(split_body, start=offset+1): if row in valid_rows: self.term.add_line(win, text, row, 1) # Unfortunately vline() doesn't support custom color so we have to # build it one segment at a time. attr = Color.get_level(data['level']) x = 0 for y in range(n_rows): self.term.addch(win, y, x, self.term.vline, attr) return attr | self.term.vline
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def linear_1d(module, grad_input, grad_output): """No change made to gradients.""" return None
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _draw_more_comments(self, win, data): n_rows, n_cols = win.getmaxyx() n_cols -= 1 self.term.add_line(win, '{body}'.format(**data), 0, 1) self.term.add_line( win, ' [{count}]'.format(**data), attr=curses.A_BOLD) attr = Color.get_level(data['level']) self.term.addch(win, 0, 0, self.term.vline, attr) return attr | self.term.vline
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def nonlinear_1d(module, grad_input, grad_output): delta_out = module.y[: int(module.y.shape[0] / 2)] - module.y[int(module.y.shape[0] / 2):] delta_in = module.x[: int(module.x.shape[0] / 2)] - module.x[int(module.x.shape[0] / 2):] dup0 = [2] + [1 for i in delta_in.shape[1:]] # handles numerical instabilities where delta_in is very small by # just taking the gradient in those cases grads = [None for _ in grad_input] grads[0] = torch.where(torch.abs(delta_in.repeat(dup0)) < 1e-6, grad_input[0], grad_output[0] * (delta_out / delta_in).repeat(dup0)) return tuple(grads)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def wait_time_gen(): count = 0 while True: rand = random.randrange(round(interval.total_seconds())) tmp = round(start + interval.total_seconds() * count + rand - loop.time()) yield tmp count += 1
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def every_day(job, loop=None): return every(job, timedelta=timedelta(days=1), loop=loop)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def every_week(job, loop=None): return every(job, timedelta=timedelta(days=7), loop=loop)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _nearest_weekday(weekday): return datetime.now() + timedelta(days=(weekday - datetime.now().weekday()) % 7)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _every_weekday(job, weekday, loop=None): return every(job, timedelta=timedelta(days=7), start_at=_nearest_weekday(weekday), loop=loop)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_dummy_request(): from rasa.nlu.emulators.no_emulator import NoEmulator em = NoEmulator() norm = em.normalise_request_json({"text": ["arb text"]}) assert norm == {"text": "arb text", "time": None} norm = em.normalise_request_json({"text": ["arb text"], "time": "1499279161658"}) assert norm == {"text": "arb text", "time": "1499279161658"}
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self): ApiCli.__init__(self) self.path = "v1/account/sources/" self.method = "GET"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_dummy_response(): from rasa.nlu.emulators.no_emulator import NoEmulator em = NoEmulator() data = {"intent": "greet", "text": "hi", "entities": {}, "confidence": 1.0} assert em.normalise_response_json(data) == data
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, root, transforms=None): super().__init__(root=root) self.transforms = transforms self._flow_list = [] self._image_list = []
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _read_img(self, file_name): img = Image.open(file_name) if img.mode != "RGB": img = img.convert("RGB") return img
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _read_flow(self, file_name): # Return the flow or a tuple with the flow and the valid_flow_mask if _has_builtin_flow_mask is True pass
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __getitem__(self, index): img1 = self._read_img(self._image_list[index][0]) img2 = self._read_img(self._image_list[index][1]) if self._flow_list: # it will be empty for some dataset when split="test" flow = self._read_flow(self._flow_list[index]) if self._has_builtin_flow_mask: flow, valid_flow_mask = flow else: valid_flow_mask = None else: flow = valid_flow_mask = None if self.transforms is not None: img1, img2, flow, valid_flow_mask = self.transforms(img1, img2, flow, valid_flow_mask) if self._has_builtin_flow_mask or valid_flow_mask is not None: # The `or valid_flow_mask is not None` part is here because the mask can be generated within a transform return img1, img2, flow, valid_flow_mask else: return img1, img2, flow
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __len__(self): return len(self._image_list)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __rmul__(self, v): return torch.utils.data.ConcatDataset([self] * v)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, root, split="train", pass_name="clean", transforms=None): super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both")) passes = ["clean", "final"] if pass_name == "both" else [pass_name] root = Path(root) / "Sintel" flow_root = root / "training" / "flow" for pass_name in passes: split_dir = "training" if split == "train" else split image_root = root / split_dir / pass_name for scene in os.listdir(image_root): image_list = sorted(glob(str(image_root / scene / "*.png"))) for i in range(len(image_list) - 1): self._image_list += [[image_list[i], image_list[i + 1]]] if split == "train": self._flow_list += sorted(glob(str(flow_root / scene / "*.flo")))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __getitem__(self, index): """Return example at given index. Args: index(int): The index of the example to retrieve Returns: tuple: A 3-tuple with ``(img1, img2, flow)``. The flow is a numpy array of shape (2, H, W) and the images are PIL images. ``flow`` is None if ``split="test"``. If a valid flow mask is generated within the ``transforms`` parameter, a 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` is returned. """ return super().__getitem__(index)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _read_flow(self, file_name): return _read_flo(file_name)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, root, split="train", transforms=None): super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) root = Path(root) / "KittiFlow" / (split + "ing") images1 = sorted(glob(str(root / "image_2" / "*_10.png"))) images2 = sorted(glob(str(root / "image_2" / "*_11.png"))) if not images1 or not images2: raise FileNotFoundError( "Could not find the Kitti flow images. Please make sure the directory structure is correct." ) for img1, img2 in zip(images1, images2): self._image_list += [[img1, img2]] if split == "train": self._flow_list = sorted(glob(str(root / "flow_occ" / "*_10.png")))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __getitem__(self, index): """Return example at given index. Args: index(int): The index of the example to retrieve Returns: tuple: A 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` where ``valid_flow_mask`` is a numpy boolean mask of shape (H, W) indicating which flow values are valid. The flow is a numpy array of shape (2, H, W) and the images are PIL images. ``flow`` and ``valid_flow_mask`` are None if ``split="test"``. """ return super().__getitem__(index)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _read_flow(self, file_name): return _read_16bits_png_with_flow_and_valid_mask(file_name)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, root, split="train", transforms=None): super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "val")) root = Path(root) / "FlyingChairs" images = sorted(glob(str(root / "data" / "*.ppm"))) flows = sorted(glob(str(root / "data" / "*.flo"))) split_file_name = "FlyingChairs_train_val.txt" if not os.path.exists(root / split_file_name): raise FileNotFoundError( "The FlyingChairs_train_val.txt file was not found - please download it from the dataset page (see docstring)." ) split_list = np.loadtxt(str(root / split_file_name), dtype=np.int32) for i in range(len(flows)): split_id = split_list[i] if (split == "train" and split_id == 1) or (split == "val" and split_id == 2): self._flow_list += [flows[i]] self._image_list += [[images[2 * i], images[2 * i + 1]]]
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, root, split="train", pass_name="clean", camera="left", transforms=None): super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) split = split.upper() verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both")) passes = { "clean": ["frames_cleanpass"], "final": ["frames_finalpass"], "both": ["frames_cleanpass", "frames_finalpass"], }[pass_name] verify_str_arg(camera, "camera", valid_values=("left", "right", "both")) cameras = ["left", "right"] if camera == "both" else [camera] root = Path(root) / "FlyingThings3D" directions = ("into_future", "into_past") for pass_name, camera, direction in itertools.product(passes, cameras, directions): image_dirs = sorted(glob(str(root / pass_name / split / "*/*"))) image_dirs = sorted(Path(image_dir) / camera for image_dir in image_dirs) flow_dirs = sorted(glob(str(root / "optical_flow" / split / "*/*"))) flow_dirs = sorted(Path(flow_dir) / direction / camera for flow_dir in flow_dirs) if not image_dirs or not flow_dirs: raise FileNotFoundError( "Could not find the FlyingThings3D flow images. " "Please make sure the directory structure is correct." ) for image_dir, flow_dir in zip(image_dirs, flow_dirs): images = sorted(glob(str(image_dir / "*.png"))) flows = sorted(glob(str(flow_dir / "*.pfm"))) for i in range(len(flows) - 1): if direction == "into_future": self._image_list += [[images[i], images[i + 1]]] self._flow_list += [flows[i]] elif direction == "into_past": self._image_list += [[images[i + 1], images[i]]] self._flow_list += [flows[i + 1]]