input
stringlengths
11
7.65k
target
stringlengths
22
8.26k
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _maybe_name(obj): """Returns object name if it has one, or a message otherwise. This is useful for names that apper in error messages. Args: obj: Object to get the name of. Returns: name, "None", or a "no name" message. """ if obj is None: return "None" elif hasattr(obj, "name"): return obj.name else: return "<no name for %s>" % type(obj)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def testResidualWrapper(self): wrapper_type = rnn_cell_wrapper_v2.ResidualWrapper x = ops.convert_to_tensor_v2_with_dispatch( np.array([[1., 1., 1.]]), dtype="float32") m = ops.convert_to_tensor_v2_with_dispatch( np.array([[0.1, 0.1, 0.1]]), dtype="float32") base_cell = rnn_cell_impl.GRUCell( 3, kernel_initializer=init_ops.constant_initializer(0.5), bias_initializer=init_ops.constant_initializer(0.5)) g, m_new = base_cell(x, m) wrapper_object = wrapper_type(base_cell) children = wrapper_object._trackable_children() wrapper_object.get_config() # Should not throw an error self.assertIn("cell", children) self.assertIs(children["cell"], base_cell) g_res, m_new_res = wrapper_object(x, m) self.evaluate([variables_lib.global_variables_initializer()]) res = self.evaluate([g, g_res, m_new, m_new_res]) # Residual connections self.assertAllClose(res[1], res[0] + [1., 1., 1.]) # States are left untouched self.assertAllClose(res[2], res[3])
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _restore_checkpoint_and_maybe_run_saved_model_initializers( sess, saver, path): """Restores checkpoint values and SavedModel initializers if found.""" # NOTE: All references to SavedModel refer to SavedModels loaded from the # load_v2 API (which does not require the `sess` argument). # If the graph contains resources loaded from a SavedModel, they are not # restored when calling `saver.restore`. Thus, the SavedModel initializer must # be called with `saver.restore` to properly initialize the model. # The SavedModel init is stored in the "saved_model_initializers" collection. # This collection is part of the MetaGraph's default_init_op, so it is already # called by MonitoredSession as long as the saver doesn't restore any # checkpoints from the working dir. saved_model_init_ops = ops.get_collection("saved_model_initializers") if saved_model_init_ops: sess.run(saved_model_init_ops) # The saver must be called *after* the SavedModel init, because the SavedModel # init will restore the variables from the SavedModel variables directory. # Initializing/restoring twice is not ideal but there's no other way to do it. saver.restore(sess, path)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def residual_with_slice_fn(inp, out): inp_sliced = array_ops.slice(inp, [0, 0], [-1, 3]) return inp_sliced + out
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, local_init_op=None, ready_op=None, ready_for_local_init_op=None, graph=None, recovery_wait_secs=30, local_init_run_options=None, local_init_feed_dict=None): """Creates a SessionManager. The `local_init_op` is an `Operation` that is run always after a new session was created. If `None`, this step is skipped. The `ready_op` is an `Operation` used to check if the model is ready. The model is considered ready if that operation returns an empty 1D string tensor. If the operation returns a non empty 1D string tensor, the elements are concatenated and used to indicate to the user why the model is not ready. The `ready_for_local_init_op` is an `Operation` used to check if the model is ready to run local_init_op. The model is considered ready if that operation returns an empty 1D string tensor. If the operation returns a non empty 1D string tensor, the elements are concatenated and used to indicate to the user why the model is not ready. If `ready_op` is `None`, the model is not checked for readiness. `recovery_wait_secs` is the number of seconds between checks that the model is ready. It is used by processes to wait for a model to be initialized or restored. Defaults to 30 seconds. Args: local_init_op: An `Operation` run immediately after session creation. Usually used to initialize tables and local variables. ready_op: An `Operation` to check if the model is initialized. ready_for_local_init_op: An `Operation` to check if the model is ready to run local_init_op. graph: The `Graph` that the model will use. recovery_wait_secs: Seconds between checks for the model to be ready. local_init_run_options: RunOptions to be passed to session.run when executing the local_init_op. local_init_feed_dict: Optional session feed dictionary to use when running the local_init_op. Raises: ValueError: If ready_for_local_init_op is not None but local_init_op is None """ # Sets default values of arguments. if graph is None: graph = ops.get_default_graph() self._local_init_op = local_init_op self._ready_op = ready_op self._ready_for_local_init_op = ready_for_local_init_op self._graph = graph self._recovery_wait_secs = recovery_wait_secs self._target = None self._local_init_run_options = local_init_run_options self._local_init_feed_dict = local_init_feed_dict if ready_for_local_init_op is not None and local_init_op is None: raise ValueError("If you pass a ready_for_local_init_op " "you must also pass a local_init_op " ", ready_for_local_init_op [%s]" % ready_for_local_init_op)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def testDeviceWrapper(self): wrapper_type = rnn_cell_wrapper_v2.DeviceWrapper x = array_ops.zeros([1, 3]) m = array_ops.zeros([1, 3]) cell = rnn_cell_impl.GRUCell(3) wrapped_cell = wrapper_type(cell, "/cpu:0") children = wrapped_cell._trackable_children() wrapped_cell.get_config() # Should not throw an error self.assertIn("cell", children) self.assertIs(children["cell"], cell) outputs, _ = wrapped_cell(x, m) self.assertIn("cpu:0", outputs.device.lower())
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _restore_checkpoint(self, master, saver=None, checkpoint_dir=None, checkpoint_filename_with_path=None, wait_for_checkpoint=False, max_wait_secs=7200, config=None): """Creates a `Session`, and tries to restore a checkpoint. Args: master: `String` representation of the TensorFlow master to use. saver: A `Saver` object used to restore a model. checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the dir will be used to restore. checkpoint_filename_with_path: Full file name path to the checkpoint file. wait_for_checkpoint: Whether to wait for checkpoint to become available. max_wait_secs: Maximum time to wait for checkpoints to become available. config: Optional `ConfigProto` proto used to configure the session. Returns: A pair (sess, is_restored) where 'is_restored' is `True` if the session could be restored, `False` otherwise. Raises: ValueError: If both checkpoint_dir and checkpoint_filename_with_path are set. """ self._target = master # This is required to so that we initialize the TPU device before # restoring from checkpoint since we'll be placing variables on the device # and TPUInitialize wipes out the memory of the device. strategy = distribution_strategy_context.get_strategy() if strategy and hasattr(strategy.extended, "_experimental_initialize_system"): strategy.extended._experimental_initialize_system() # pylint: disable=protected-access sess = session.Session(self._target, graph=self._graph, config=config) if checkpoint_dir and checkpoint_filename_with_path: raise ValueError("Can not provide both checkpoint_dir and " "checkpoint_filename_with_path.") # If either saver or checkpoint_* is not specified, cannot restore. Just # return. if not saver or not (checkpoint_dir or checkpoint_filename_with_path): return sess, False if checkpoint_filename_with_path: _restore_checkpoint_and_maybe_run_saved_model_initializers( sess, saver, checkpoint_filename_with_path) return sess, True # Waits up until max_wait_secs for checkpoint to become available. wait_time = 0 ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir) while not ckpt or not ckpt.model_checkpoint_path: if wait_for_checkpoint and wait_time < max_wait_secs: logging.info("Waiting for checkpoint to be available.") time.sleep(self._recovery_wait_secs) wait_time += self._recovery_wait_secs ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir) else: return sess, False # Loads the checkpoint. _restore_checkpoint_and_maybe_run_saved_model_initializers( sess, saver, ckpt.model_checkpoint_path) saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths) return sess, True
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def testWrapperKerasStyle(self, wrapper, wrapper_v2): """Tests if wrapper cell is instantiated in keras style scope.""" wrapped_cell_v2 = wrapper_v2(rnn_cell_impl.BasicRNNCell(1)) self.assertIsNone(getattr(wrapped_cell_v2, "_keras_style", None)) wrapped_cell = wrapper(rnn_cell_impl.BasicRNNCell(1)) self.assertFalse(wrapped_cell._keras_style)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def prepare_session(self, master, init_op=None, saver=None, checkpoint_dir=None, checkpoint_filename_with_path=None, wait_for_checkpoint=False, max_wait_secs=7200, config=None, init_feed_dict=None, init_fn=None): """Creates a `Session`. Makes sure the model is ready to be used. Creates a `Session` on 'master'. If a `saver` object is passed in, and `checkpoint_dir` points to a directory containing valid checkpoint files, then it will try to recover the model from checkpoint. If no checkpoint files are available, and `wait_for_checkpoint` is `True`, then the process would check every `recovery_wait_secs`, up to `max_wait_secs`, for recovery to succeed. If the model cannot be recovered successfully then it is initialized by running the `init_op` and calling `init_fn` if they are provided. The `local_init_op` is also run after init_op and init_fn, regardless of whether the model was recovered successfully, but only if `ready_for_local_init_op` passes. If the model is recovered from a checkpoint it is assumed that all global variables have been initialized, in particular neither `init_op` nor `init_fn` will be executed. It is an error if the model cannot be recovered and no `init_op` or `init_fn` or `local_init_op` are passed. Args: master: `String` representation of the TensorFlow master to use. init_op: Optional `Operation` used to initialize the model. saver: A `Saver` object used to restore a model. checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the dir will be used to restore. checkpoint_filename_with_path: Full file name path to the checkpoint file. wait_for_checkpoint: Whether to wait for checkpoint to become available. max_wait_secs: Maximum time to wait for checkpoints to become available. config: Optional `ConfigProto` proto used to configure the session. init_feed_dict: Optional dictionary that maps `Tensor` objects to feed values. This feed dictionary is passed to the session `run()` call when running the init op. init_fn: Optional callable used to initialize the model. Called after the optional `init_op` is called. The callable must accept one argument, the session being initialized. Returns: A `Session` object that can be used to drive the model. Raises: RuntimeError: If the model cannot be initialized or recovered. ValueError: If both checkpoint_dir and checkpoint_filename_with_path are set. """ sess, is_loaded_from_checkpoint = self._restore_checkpoint( master, saver, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path, wait_for_checkpoint=wait_for_checkpoint, max_wait_secs=max_wait_secs, config=config) if not is_loaded_from_checkpoint: if init_op is None and not init_fn and self._local_init_op is None: raise RuntimeError("Model is not initialized and no init_op or " "init_fn or local_init_op was given") if init_op is not None: sess.run(init_op, feed_dict=init_feed_dict) if init_fn: init_fn(sess) local_init_success, msg = self._try_run_local_init_op(sess) if not local_init_success: raise RuntimeError( "Init operations did not make model ready for local_init. " "Init op: %s, init fn: %s, error: %s" % (_maybe_name(init_op), init_fn, msg)) is_ready, msg = self._model_ready(sess) if not is_ready: raise RuntimeError( "Init operations did not make model ready. " "Init op: %s, init fn: %s, local_init_op: %s, error: %s" % (_maybe_name(init_op), init_fn, self._local_init_op, msg)) return sess
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def testWrapperWeights(self, wrapper): """Tests that wrapper weights contain wrapped cells weights.""" base_cell = layers.SimpleRNNCell(1, name="basic_rnn_cell") rnn_cell = wrapper(base_cell) rnn_layer = layers.RNN(rnn_cell) inputs = ops.convert_to_tensor_v2_with_dispatch([[[1]]], dtype=dtypes.float32) rnn_layer(inputs) wrapper_name = generic_utils.to_snake_case(wrapper.__name__) expected_weights = ["rnn/" + wrapper_name + "/" + var for var in ("kernel:0", "recurrent_kernel:0", "bias:0")] self.assertLen(rnn_cell.weights, 3) self.assertCountEqual([v.name for v in rnn_cell.weights], expected_weights) self.assertCountEqual([v.name for v in rnn_cell.trainable_variables], expected_weights) self.assertCountEqual([v.name for v in rnn_cell.non_trainable_variables], []) self.assertCountEqual([v.name for v in rnn_cell.cell.weights], expected_weights)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def recover_session(self, master, saver=None, checkpoint_dir=None, checkpoint_filename_with_path=None, wait_for_checkpoint=False, max_wait_secs=7200, config=None): """Creates a `Session`, recovering if possible. Creates a new session on 'master'. If the session is not initialized and can be recovered from a checkpoint, recover it. Args: master: `String` representation of the TensorFlow master to use. saver: A `Saver` object used to restore a model. checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the dir will be used to restore. checkpoint_filename_with_path: Full file name path to the checkpoint file. wait_for_checkpoint: Whether to wait for checkpoint to become available. max_wait_secs: Maximum time to wait for checkpoints to become available. config: Optional `ConfigProto` proto used to configure the session. Returns: A pair (sess, initialized) where 'initialized' is `True` if the session could be recovered and initialized, `False` otherwise. Raises: ValueError: If both checkpoint_dir and checkpoint_filename_with_path are set. """ sess, is_loaded_from_checkpoint = self._restore_checkpoint( master, saver, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path, wait_for_checkpoint=wait_for_checkpoint, max_wait_secs=max_wait_secs, config=config) # Always try to run local_init_op local_init_success, msg = self._try_run_local_init_op(sess) if not is_loaded_from_checkpoint: # Do not need to run checks for readiness return sess, False restoring_file = checkpoint_dir or checkpoint_filename_with_path if not local_init_success: logging.info( "Restoring model from %s did not make model ready for local init:" " %s", restoring_file, msg) return sess, False is_ready, msg = self._model_ready(sess) if not is_ready: logging.info("Restoring model from %s did not make model ready: %s", restoring_file, msg) return sess, False logging.info("Restored model from %s", restoring_file) return sess, is_loaded_from_checkpoint
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def testWrapperV2Caller(self, wrapper): """Tests that wrapper V2 is using the LayerRNNCell's caller.""" with legacy_base_layer.keras_style_scope(): base_cell = rnn_cell_impl.MultiRNNCell( [rnn_cell_impl.BasicRNNCell(1) for _ in range(2)]) rnn_cell = wrapper(base_cell) inputs = ops.convert_to_tensor_v2_with_dispatch([[1]], dtype=dtypes.float32) state = ops.convert_to_tensor_v2_with_dispatch([[1]], dtype=dtypes.float32) _ = rnn_cell(inputs, [state, state]) weights = base_cell._cells[0].weights self.assertLen(weights, expected_len=2) self.assertTrue(all("_wrapper" in v.name for v in weights))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def wait_for_session(self, master, config=None, max_wait_secs=float("Inf")): """Creates a new `Session` and waits for model to be ready. Creates a new `Session` on 'master'. Waits for the model to be initialized or recovered from a checkpoint. It's expected that another thread or process will make the model ready, and that this is intended to be used by threads/processes that participate in a distributed training configuration where a different thread/process is responsible for initializing or recovering the model being trained. NB: The amount of time this method waits for the session is bounded by max_wait_secs. By default, this function will wait indefinitely. Args: master: `String` representation of the TensorFlow master to use. config: Optional ConfigProto proto used to configure the session. max_wait_secs: Maximum time to wait for the session to become available. Returns: A `Session`. May be None if the operation exceeds the timeout specified by config.operation_timeout_in_ms. Raises: tf.DeadlineExceededError: if the session is not available after max_wait_secs. """ self._target = master if max_wait_secs is None: max_wait_secs = float("Inf") timer = _CountDownTimer(max_wait_secs) while True: sess = session.Session(self._target, graph=self._graph, config=config) not_ready_msg = None not_ready_local_msg = None local_init_success, not_ready_local_msg = self._try_run_local_init_op( sess) if local_init_success: # Successful if local_init_op is None, or ready_for_local_init_op passes is_ready, not_ready_msg = self._model_ready(sess) if is_ready: return sess self._safe_close(sess) # Do we have enough time left to try again? remaining_ms_after_wait = ( timer.secs_remaining() - self._recovery_wait_secs) if remaining_ms_after_wait < 0: raise errors.DeadlineExceededError( None, None, "Session was not ready after waiting %d secs." % (max_wait_secs,)) logging.info("Waiting for model to be ready. " "Ready_for_local_init_op: %s, ready: %s", not_ready_local_msg, not_ready_msg) time.sleep(self._recovery_wait_secs)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def testWrapperV2Build(self, wrapper): cell = rnn_cell_impl.LSTMCell(10) wrapper = wrapper(cell) wrapper.build((1,)) self.assertTrue(cell.built)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _safe_close(self, sess): """Closes a session without raising an exception. Just like sess.close() but ignores exceptions. Args: sess: A `Session`. """ # pylint: disable=broad-except try: sess.close() except Exception: # Intentionally not logging to avoid user complaints that # they get cryptic errors. We really do not care that Close # fails. pass # pylint: enable=broad-except
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def testDeviceWrapperSerialization(self): wrapper_cls = rnn_cell_wrapper_v2.DeviceWrapper cell = layers.LSTMCell(10) wrapper = wrapper_cls(cell, "/cpu:0") config = wrapper.get_config() reconstructed_wrapper = wrapper_cls.from_config(config) self.assertDictEqual(config, reconstructed_wrapper.get_config()) self.assertIsInstance(reconstructed_wrapper, wrapper_cls)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _model_ready(self, sess): """Checks if the model is ready or not. Args: sess: A `Session`. Returns: A tuple (is_ready, msg), where is_ready is True if ready and False otherwise, and msg is `None` if the model is ready, a `String` with the reason why it is not ready otherwise. """ return _ready(self._ready_op, sess, "Model not ready")
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def residual_fn(inputs, outputs): return inputs * 3 + outputs
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _model_ready_for_local_init(self, sess): """Checks if the model is ready to run local_init_op. Args: sess: A `Session`. Returns: A tuple (is_ready, msg), where is_ready is True if ready to run local_init_op and False otherwise, and msg is `None` if the model is ready to run local_init_op, a `String` with the reason why it is not ready otherwise. """ return _ready(self._ready_for_local_init_op, sess, "Model not ready for local init")
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def dropout_state_filter_visitor(unused_state): return False
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _try_run_local_init_op(self, sess): """Tries to run _local_init_op, if not None, and is ready for local init. Args: sess: A `Session`. Returns: A tuple (is_successful, msg), where is_successful is True if _local_init_op is None, or we ran _local_init_op, and False otherwise; and msg is a `String` with the reason why the model was not ready to run local init. """ if self._local_init_op is not None: is_ready_for_local_init, msg = self._model_ready_for_local_init(sess) if is_ready_for_local_init: logging.info("Running local_init_op.") sess.run(self._local_init_op, feed_dict=self._local_init_feed_dict, options=self._local_init_run_options) logging.info("Done running local_init_op.") return True, None else: return False, msg return True, None
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def testDropoutWrapperWithKerasLSTMCell(self): wrapper_cls = rnn_cell_wrapper_v2.DropoutWrapper cell = layers.LSTMCell(10) with self.assertRaisesRegex(ValueError, "does not work with "): wrapper_cls(cell) cell = layers.LSTMCellV2(10) with self.assertRaisesRegex(ValueError, "does not work with "): wrapper_cls(cell)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _ready(op, sess, msg): """Checks if the model is ready or not, as determined by op. Args: op: An op, either _ready_op or _ready_for_local_init_op, which defines the readiness of the model. sess: A `Session`. msg: A message to log to warning if not ready Returns: A tuple (is_ready, msg), where is_ready is True if ready and False otherwise, and msg is `None` if the model is ready, a `String` with the reason why it is not ready otherwise. """ if op is None: return True, None else: try: ready_value = sess.run(op) # The model is considered ready if ready_op returns an empty 1-D tensor. # Also compare to `None` and dtype being int32 for backward # compatibility. if (ready_value is None or ready_value.dtype == np.int32 or ready_value.size == 0): return True, None else: # TODO(sherrym): If a custom ready_op returns other types of tensor, # or strings other than variable names, this message could be # confusing. non_initialized_varnames = ", ".join( [i.decode("utf-8") for i in ready_value]) return False, "Variables not initialized: " + non_initialized_varnames except errors.FailedPreconditionError as e: if "uninitialized" not in str(e): logging.warning("%s : error [%s]", msg, str(e)) raise e return False, str(e)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, duration_secs): self._start_time_secs = time.time() self._duration_secs = duration_secs
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, name: str, get_converter: t.Optional[t.Callable] = None) -> None: self.__name__ = name self.get_converter = get_converter
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_refraction_angle(): n1, n2 = symbols('n1, n2') m1 = Medium('m1') m2 = Medium('m2') r1 = Ray3D(Point3D(-1, -1, 1), Point3D(0, 0, 0)) i = Matrix([1, 1, 1]) n = Matrix([0, 0, 1]) normal_ray = Ray3D(Point3D(0, 0, 0), Point3D(0, 0, 1)) P = Plane(Point3D(0, 0, 0), normal_vector=[0, 0, 1]) assert refraction_angle(r1, 1, 1, n) == Matrix([ [ 1], [ 1], [-1]]) assert refraction_angle([1, 1, 1], 1, 1, n) == Matrix([ [ 1], [ 1], [-1]]) assert refraction_angle((1, 1, 1), 1, 1, n) == Matrix([ [ 1], [ 1], [-1]]) assert refraction_angle(i, 1, 1, [0, 0, 1]) == Matrix([ [ 1], [ 1], [-1]]) assert refraction_angle(i, 1, 1, (0, 0, 1)) == Matrix([ [ 1], [ 1], [-1]]) assert refraction_angle(i, 1, 1, normal_ray) == Matrix([ [ 1], [ 1], [-1]]) assert refraction_angle(i, 1, 1, plane=P) == Matrix([ [ 1], [ 1], [-1]]) assert refraction_angle(r1, 1, 1, plane=P) == \ Ray3D(Point3D(0, 0, 0), Point3D(1, 1, -1)) assert refraction_angle(r1, m1, 1.33, plane=P) == \ Ray3D(Point3D(0, 0, 0), Point3D(Rational(100, 133), Rational(100, 133), -789378201649271*sqrt(3)/1000000000000000)) assert refraction_angle(r1, 1, m2, plane=P) == \ Ray3D(Point3D(0, 0, 0), Point3D(1, 1, -1)) assert refraction_angle(r1, n1, n2, plane=P) == \ Ray3D(Point3D(0, 0, 0), Point3D(n1/n2, n1/n2, -sqrt(3)*sqrt(-2*n1**2/(3*n2**2) + 1))) assert refraction_angle(r1, 1.33, 1, plane=P) == 0 # TIR assert refraction_angle(r1, 1, 1, normal_ray) == \ Ray3D(Point3D(0, 0, 0), direction_ratio=[1, 1, -1]) assert ae(refraction_angle(0.5, 1, 2), 0.24207, 5) assert ae(refraction_angle(0.5, 2, 1), 1.28293, 5) raises(ValueError, lambda: refraction_angle(r1, m1, m2, normal_ray, P)) raises(TypeError, lambda: refraction_angle(m1, m1, m2)) # can add other values for arg[0] raises(TypeError, lambda: refraction_angle(r1, m1, m2, None, i)) raises(TypeError, lambda: refraction_angle(r1, m1, m2, m2))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __get__(self, obj: t.Any, owner: t.Any = None) -> t.Any: if obj is None: return self rv = obj.config[self.__name__] if self.get_converter is not None: rv = self.get_converter(rv) return rv
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_fresnel_coefficients(): assert all(ae(i, j, 5) for i, j in zip( fresnel_coefficients(0.5, 1, 1.33), [0.11163, -0.17138, 0.83581, 0.82862])) assert all(ae(i, j, 5) for i, j in zip( fresnel_coefficients(0.5, 1.33, 1), [-0.07726, 0.20482, 1.22724, 1.20482])) m1 = Medium('m1') m2 = Medium('m2', n=2) assert all(ae(i, j, 5) for i, j in zip( fresnel_coefficients(0.3, m1, m2), [0.31784, -0.34865, 0.65892, 0.65135])) ans = [[-0.23563, -0.97184], [0.81648, -0.57738]] got = fresnel_coefficients(0.6, m2, m1) for i, j in zip(got, ans): for a, b in zip(i.as_real_imag(), j): assert ae(a, b, 5)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __set__(self, obj: t.Any, value: t.Any) -> None: obj.config[self.__name__] = value
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_deviation(): n1, n2 = symbols('n1, n2') r1 = Ray3D(Point3D(-1, -1, 1), Point3D(0, 0, 0)) n = Matrix([0, 0, 1]) i = Matrix([-1, -1, -1]) normal_ray = Ray3D(Point3D(0, 0, 0), Point3D(0, 0, 1)) P = Plane(Point3D(0, 0, 0), normal_vector=[0, 0, 1]) assert deviation(r1, 1, 1, normal=n) == 0 assert deviation(r1, 1, 1, plane=P) == 0 assert deviation(r1, 1, 1.1, plane=P).evalf(3) + 0.119 < 1e-3 assert deviation(i, 1, 1.1, normal=normal_ray).evalf(3) + 0.119 < 1e-3 assert deviation(r1, 1.33, 1, plane=P) is None # TIR assert deviation(r1, 1, 1, normal=[0, 0, 1]) == 0 assert deviation([-1, -1, -1], 1, 1, normal=[0, 0, 1]) == 0 assert ae(deviation(0.5, 1, 2), -0.25793, 5) assert ae(deviation(0.5, 2, 1), 0.78293, 5)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, root_path: str, defaults: t.Optional[dict] = None) -> None: dict.__init__(self, defaults or {}) self.root_path = root_path
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_brewster_angle(): m1 = Medium('m1', n=1) m2 = Medium('m2', n=1.33) assert ae(brewster_angle(m1, m2), 0.93, 2) m1 = Medium('m1', permittivity=e0, n=1) m2 = Medium('m2', permittivity=e0, n=1.33) assert ae(brewster_angle(m1, m2), 0.93, 2) assert ae(brewster_angle(1, 1.33), 0.93, 2)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def from_envvar(self, variable_name: str, silent: bool = False) -> bool: """Loads a configuration from an environment variable pointing to a configuration file. This is basically just a shortcut with nicer error messages for this line of code:: app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS']) :param variable_name: name of the environment variable :param silent: set to ``True`` if you want silent failure for missing files. :return: bool. ``True`` if able to load config, ``False`` otherwise. """ rv = os.environ.get(variable_name) if not rv: if silent: return False raise RuntimeError( f"The environment variable {variable_name!r} is not set" " and as such configuration could not be loaded. Set" " this variable and make it point to a configuration" " file" ) return self.from_pyfile(rv, silent=silent)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_critical_angle(): m1 = Medium('m1', n=1) m2 = Medium('m2', n=1.33) assert ae(critical_angle(m2, m1), 0.85, 2)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def from_pyfile(self, filename: str, silent: bool = False) -> bool: """Updates the values in the config from a Python file. This function behaves as if the file was imported as module with the :meth:`from_object` function. :param filename: the filename of the config. This can either be an absolute filename or a filename relative to the root path. :param silent: set to ``True`` if you want silent failure for missing files. .. versionadded:: 0.7 `silent` parameter. """ filename = os.path.join(self.root_path, filename) d = types.ModuleType("config") d.__file__ = filename try: with open(filename, mode="rb") as config_file: exec(compile(config_file.read(), filename, "exec"), d.__dict__) except OSError as e: if silent and e.errno in (errno.ENOENT, errno.EISDIR, errno.ENOTDIR): return False e.strerror = f"Unable to load configuration file ({e.strerror})" raise self.from_object(d) return True
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_lens_makers_formula(): n1, n2 = symbols('n1, n2') m1 = Medium('m1', permittivity=e0, n=1) m2 = Medium('m2', permittivity=e0, n=1.33) assert lens_makers_formula(n1, n2, 10, -10) == 5*n2/(n1 - n2) assert ae(lens_makers_formula(m1, m2, 10, -10), -20.15, 2) assert ae(lens_makers_formula(1.33, 1, 10, -10), 15.15, 2)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def from_object(self, obj: t.Union[object, str]) -> None: """Updates the values from the given object. An object can be of one of the following two types: - a string: in this case the object with that name will be imported - an actual object reference: that object is used directly Objects are usually either modules or classes. :meth:`from_object` loads only the uppercase attributes of the module/class. A ``dict`` object will not work with :meth:`from_object` because the keys of a ``dict`` are not attributes of the ``dict`` class. Example of module-based configuration:: app.config.from_object('yourapplication.default_config') from yourapplication import default_config app.config.from_object(default_config) Nothing is done to the object before loading. If the object is a class and has ``@property`` attributes, it needs to be instantiated before being passed to this method. You should not use this function to load the actual configuration but rather configuration defaults. The actual config should be loaded with :meth:`from_pyfile` and ideally from a location not within the package because the package might be installed system wide. See :ref:`config-dev-prod` for an example of class-based configuration using :meth:`from_object`. :param obj: an import name or object """ if isinstance(obj, str): obj = import_string(obj) for key in dir(obj): if key.isupper(): self[key] = getattr(obj, key)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_mirror_formula(): u, v, f = symbols('u, v, f') assert mirror_formula(focal_length=f, u=u) == f*u/(-f + u) assert mirror_formula(focal_length=f, v=v) == f*v/(-f + v) assert mirror_formula(u=u, v=v) == u*v/(u + v) assert mirror_formula(u=oo, v=v) == v assert mirror_formula(u=oo, v=oo) is oo assert mirror_formula(focal_length=oo, u=u) == -u assert mirror_formula(u=u, v=oo) == u assert mirror_formula(focal_length=oo, v=oo) is oo assert mirror_formula(focal_length=f, v=oo) == f assert mirror_formula(focal_length=oo, v=v) == -v assert mirror_formula(focal_length=oo, u=oo) is oo assert mirror_formula(focal_length=f, u=oo) == f assert mirror_formula(focal_length=oo, u=u) == -u raises(ValueError, lambda: mirror_formula(focal_length=f, u=u, v=v))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def from_file( self, filename: str, load: t.Callable[[t.IO[t.Any]], t.Mapping], silent: bool = False, ) -> bool: """Update the values in the config from a file that is loaded using the ``load`` parameter. The loaded data is passed to the :meth:`from_mapping` method. .. code-block:: python import toml app.config.from_file("config.toml", load=toml.load) :param filename: The path to the data file. This can be an absolute path or relative to the config root path. :param load: A callable that takes a file handle and returns a mapping of loaded data from the file. :type load: ``Callable[[Reader], Mapping]`` where ``Reader`` implements a ``read`` method. :param silent: Ignore the file if it doesn't exist. .. versionadded:: 2.0 """ filename = os.path.join(self.root_path, filename) try: with open(filename) as f: obj = load(f) except OSError as e: if silent and e.errno in (errno.ENOENT, errno.EISDIR): return False e.strerror = f"Unable to load configuration file ({e.strerror})" raise return self.from_mapping(obj)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_lens_formula(): u, v, f = symbols('u, v, f') assert lens_formula(focal_length=f, u=u) == f*u/(f + u) assert lens_formula(focal_length=f, v=v) == f*v/(f - v) assert lens_formula(u=u, v=v) == u*v/(u - v) assert lens_formula(u=oo, v=v) == v assert lens_formula(u=oo, v=oo) is oo assert lens_formula(focal_length=oo, u=u) == u assert lens_formula(u=u, v=oo) == -u assert lens_formula(focal_length=oo, v=oo) is -oo assert lens_formula(focal_length=oo, v=v) == v assert lens_formula(focal_length=f, v=oo) == -f assert lens_formula(focal_length=oo, u=oo) is oo assert lens_formula(focal_length=oo, u=u) == u assert lens_formula(focal_length=f, u=oo) == f raises(ValueError, lambda: lens_formula(focal_length=f, u=u, v=v))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def from_json(self, filename: str, silent: bool = False) -> bool: """Update the values in the config from a JSON file. The loaded data is passed to the :meth:`from_mapping` method. :param filename: The path to the JSON file. This can be an absolute path or relative to the config root path. :param silent: Ignore the file if it doesn't exist. .. deprecated:: 2.0.0 Will be removed in Flask 2.1. Use :meth:`from_file` instead. This was removed early in 2.0.0, was added back in 2.0.1. .. versionadded:: 0.11 """ import warnings from . import json warnings.warn( "'from_json' is deprecated and will be removed in Flask" " 2.1. Use 'from_file(path, json.load)' instead.", DeprecationWarning, stacklevel=2, ) return self.from_file(filename, json.load, silent=silent)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_hyperfocal_distance(): f, N, c = symbols('f, N, c') assert hyperfocal_distance(f=f, N=N, c=c) == f**2/(N*c) assert ae(hyperfocal_distance(f=0.5, N=8, c=0.0033), 9.47, 2)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def from_mapping( self, mapping: t.Optional[t.Mapping[str, t.Any]] = None, **kwargs: t.Any ) -> bool: """Updates the config like :meth:`update` ignoring items with non-upper keys. .. versionadded:: 0.11 """ mappings: t.Dict[str, t.Any] = {} if mapping is not None: mappings.update(mapping) mappings.update(kwargs) for key, value in mappings.items(): if key.isupper(): self[key] = value return True
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def get_namespace( self, namespace: str, lowercase: bool = True, trim_namespace: bool = True ) -> t.Dict[str, t.Any]: """Returns a dictionary containing a subset of configuration options that match the specified namespace/prefix. Example usage:: app.config['IMAGE_STORE_TYPE'] = 'fs' app.config['IMAGE_STORE_PATH'] = '/var/app/images' app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com' image_store_config = app.config.get_namespace('IMAGE_STORE_') The resulting dictionary `image_store_config` would look like:: { 'type': 'fs', 'path': '/var/app/images', 'base_url': 'http://img.website.com' } This is often useful when configuration options map directly to keyword arguments in functions or class constructors. :param namespace: a configuration namespace :param lowercase: a flag indicating if the keys of the resulting dictionary should be lowercase :param trim_namespace: a flag indicating if the keys of the resulting dictionary should not include the namespace .. versionadded:: 0.11 """ rv = {} for k, v in self.items(): if not k.startswith(namespace): continue if trim_namespace: key = k[len(namespace) :] else: key = k if lowercase: key = key.lower() rv[key] = v return rv
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
async def async_get_pickup_events() -> list[PickupEvent]: """Get the next pickup.""" try: return await client.async_get_pickup_events( start_date=date.today(), end_date=date.today() + timedelta(weeks=4) ) except RecollectError as err: raise UpdateFailed( f"Error while requesting data from ReCollect: {err}" ) from err
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_can_write_simple_identifier(self): escaped = cypher_escape("foo") assert escaped == "foo"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
async def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None: """Handle an options update.""" await hass.config_entries.async_reload(entry.entry_id)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_can_write_identifier_with_odd_chars(self): escaped = cypher_escape("foo bar") assert escaped == "`foo bar`"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_can_write_identifier_containing_back_ticks(self): escaped = cypher_escape("foo `bar`") assert escaped == "`foo ``bar```"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_cannot_write_empty_identifier(self): with self.assertRaises(ValueError): _ = cypher_escape("")
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_cannot_write_none_identifier(self): with self.assertRaises(TypeError): _ = cypher_escape(None)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_none(self): encoded = cypher_repr(None) assert encoded == u"null"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_true(self): encoded = cypher_repr(True) assert encoded == u"true"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_zero(self): encoded = cypher_repr(0) assert encoded == u"0"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_positive_integer(self): encoded = cypher_repr(123) assert encoded == u"123"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_negative_integer(self): encoded = cypher_repr(-123) assert encoded == u"-123"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_zero(self): encoded = cypher_repr(0.0) assert encoded == u"0.0"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_positive_float(self): encoded = cypher_repr(123.456) assert encoded == u"123.456"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_negative_float(self): encoded = cypher_repr(-123.456) assert encoded == u"-123.456"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_bytes_with_escaped_chars(self): encoded = cypher_repr(b"hello, 'world'", quote=u"'") assert encoded == u"'hello, \\'world\\''"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_empty_string(self): encoded = cypher_repr(u"") assert encoded == u"''"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_backspace(self): encoded = cypher_repr(u"\b") assert encoded == u"'\\b'"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_horizontal_tab(self): encoded = cypher_repr(u"\t") assert encoded == u"'\\t'"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_single_quote_when_single_quoted(self): encoded = cypher_repr(u"'", quote=u"'") assert encoded == u"'\\''"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_list(self): encoded = cypher_repr([1, 2.0, u"three"]) assert encoded == u"[1, 2.0, 'three']"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_empty_list(self): encoded = cypher_repr([]) assert encoded == u"[]"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_map(self): encoded = cypher_repr(OrderedDict([("one", 1), ("two", 2.0), ("number three", u"three")])) assert encoded == u"{one: 1, two: 2.0, `number three`: 'three'}"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_empty_map(self): encoded = cypher_repr({}) assert encoded == u"{}"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_empty_node(self): a = Node() encoded = cypher_repr(a, node_template="{labels} {properties}") assert encoded == u"({})"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_node_with_property(self): a = Node(name="Alice") encoded = cypher_repr(a, node_template="{labels} {properties}") assert encoded == u"({name: 'Alice'})"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_node_with_label(self): a = Node("Person") encoded = cypher_repr(a, node_template="{labels} {properties}") assert encoded == u"(:Person {})"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_should_encode_node_with_label_and_property(self): a = Node("Person", name="Alice") encoded = cypher_repr(a, node_template="{labels} {properties}") assert encoded == u"(:Person {name: 'Alice'})"
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_can_encode_relationship(self): a = Node(name="Alice") b = Node(name="Bob") ab = KNOWS(a, b) encoded = cypher_repr(ab, related_node_template="{property.name}") self.assertEqual("(Alice)-[:KNOWS {}]->(Bob)", encoded)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_can_encode_relationship_with_names(self): a = Node("Person", name="Alice") b = Node("Person", name="Bob") ab = KNOWS(a, b) encoded = cypher_repr(ab, related_node_template="{property.name}") self.assertEqual("(Alice)-[:KNOWS {}]->(Bob)", encoded)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_can_encode_relationship_with_alternative_names(self): a = Node("Person", nom=u"Aimée") b = Node("Person", nom=u"Baptiste") ab = KNOWS_FR(a, b) encoded = cypher_repr(ab, related_node_template=u"{property.nom}") self.assertEqual(u"(Aimée)-[:CONNAÎT {}]->(Baptiste)", encoded)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def test_can_encode_relationship_with_properties(self): a = Node("Person", name="Alice") b = Node("Person", name="Bob") ab = KNOWS(a, b, since=1999) encoded = cypher_repr(ab, related_node_template="{property.name}") self.assertEqual("(Alice)-[:KNOWS {since: 1999}]->(Bob)", encoded)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __getattr__(cls, name): return MagicMock()
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def combinationSum(self, candidates, target): candidates.sort() self.result = [] self.dfs(candidates,target,0,[]) return self.result
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def dfs(self,candidates,target,start,reslist): length = len(candidates) if target == 0: return self.result.append(reslist)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def combinationSum(self, candidates, target): self.result = [] self.dfs(candidates,0,target,[]) return self.result
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def dfs(self,can,cursum,target,res): if cursum > target: return if cursum == target: self.result.append(res) return for i in xrange(len(can)): if not res or res[len(res)-1] <= can[i]: self.dfs(can,cursum+can[i],target,res+[can[i]])
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def gpio_init(pin, output): try: with open(f"/sys/class/gpio/gpio{pin}/direction", 'wb') as f: f.write(b"out" if output else b"in") except Exception as e: print(f"Failed to set gpio {pin} direction: {e}")
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def batch_norm(inputs, training, data_format, name=''): """Performs a batch normalization using a standard set of parameters.""" # We set fused=True for a significant performance boost. See # https://www.tensorflow.org/performance/performance_guide#common_fused_ops return tf.compat.v1.layers.batch_normalization( inputs=inputs, axis=1 if data_format == 'channels_first' else 3, momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True, scale=True, training=training, fused=True, name=name)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def step_impl(context, user_names_str): if not hasattr(context, 'users'): context.users = {} user_names = [name.strip() for name in re.split('and|,', user_names_str)] for user_name in user_names: token = 'fake_token_' + user_name user_id = context.helpers.create_test_user(user_name, token) context.users[user_name] = {'token': token, 'id': user_id}
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def fixed_padding(inputs, kernel_size, data_format): """Pads the input along the spatial dimensions independently of input size. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. kernel_size: The kernel to be used in the conv2d or max_pool2d operation. Should be a positive integer. data_format: The input format ('channels_last' or 'channels_first'). Returns: A tensor with the same format as the input with the data either intact (if kernel_size == 1) or padded (if kernel_size > 1). """ pad_total = kernel_size - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg if data_format == 'channels_first': padded_inputs = tf.pad( tensor=inputs, paddings=[[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]]) else: padded_inputs = tf.pad( tensor=inputs, paddings=[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) return padded_inputs
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def step_impl(context, user_name): context.token = context.users[user_name]['token']
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format, name): """Strided 2-D convolution with explicit padding.""" # The padding is consistent and is based only on `kernel_size`, not on the # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). if strides > 1: inputs = fixed_padding(inputs, kernel_size, data_format) return tf.compat.v1.layers.conv2d( inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides, padding=('SAME' if strides == 1 else 'VALID'), use_bias=False, reuse=tf.AUTO_REUSE, kernel_initializer=tf.compat.v1.variance_scaling_initializer(), data_format=data_format, name=name)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def step_impl(context, user_name, count): context.helpers.load_postcards(user_name, count)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _building_block_v2(inputs, filters, training, projection_shortcut, strides, data_format, name): """A single block for ResNet v2, without a bottleneck. Batch normalization then ReLu then convolution as described by: Identity Mappings in Deep Residual Networks https://arxiv.org/pdf/1603.05027.pdf by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. filters: The number of filters for the convolutions. training: A Boolean for whether the model is in training or inference mode. Needed for batch normalization. projection_shortcut: The function to use for projection shortcuts (typically a 1x1 convolution when downsampling the input). strides: The block's stride. If greater than 1, this block will ultimately downsample the input. data_format: The input format ('channels_last' or 'channels_first'). name: Block name. Returns: The output tensor of the block; shape should match inputs. """ shortcut = inputs first_name = name + 'first' inputs = batch_norm( inputs, training, data_format, name=first_name + 'batch_norm') inputs = tf.nn.relu(inputs, name=first_name + 'relu') # The projection shortcut should come after the first batch norm and ReLU # since it performs a 1x1 convolution. if projection_shortcut is not None: shortcut = projection_shortcut(inputs, name=first_name + 'proj') second_name = name + 'second' inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format, name=second_name + 'input') inputs = batch_norm( inputs, training, data_format, name=second_name + 'batch_norm') inputs = tf.nn.relu(inputs, name=second_name + 'relu') third_name = name + 'third' inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=1, data_format=data_format, name=third_name + 'input') return inputs + shortcut
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def step_impl(context, rel_url): context.request = LazyRequest( 'GET', context.helpers.url(rel_url), context.token)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def projection_shortcut(inputs, name): return conv2d_fixed_padding( inputs=inputs, filters=filters_out, kernel_size=1, strides=strides, data_format=data_format, name=name)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def step_impl(context, rel_url): context.request = LazyRequest( 'POST', context.helpers.url(rel_url), context.token)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __init__(self, resnet_size, bottleneck, num_classes, num_filters, kernel_size, conv_stride, first_pool_size, first_pool_stride, block_sizes, block_strides, resnet_version=DEFAULT_VERSION, data_format=None, dtype=DEFAULT_DTYPE): """Creates a model for classifying an image. Args: resnet_size: A single integer for the size of the ResNet model. bottleneck: Use regular blocks or bottleneck blocks. num_classes: The number of classes used as labels. num_filters: The number of filters to use for the first block layer of the model. This number is then doubled for each subsequent block layer. kernel_size: The kernel size to use for convolution. conv_stride: stride size for the initial convolutional layer first_pool_size: Pool size to be used for the first pooling layer. If none, the first pooling layer is skipped. first_pool_stride: stride size for the first pooling layer. Not used if first_pool_size is None. block_sizes: A list containing n values, where n is the number of sets of block layers desired. Each value should be the number of blocks in the i-th set. block_strides: List of integers representing the desired stride size for each of the sets of block layers. Should be same length as block_sizes. resnet_version: Integer representing which version of the ResNet network to use. See README for details. Valid values: [1, 2] data_format: Input format ('channels_last', 'channels_first', or None). If set to None, the format is dependent on whether a GPU is available. dtype: The TensorFlow dtype to use for calculations. If not specified tf.float32 is used. Raises: ValueError: if invalid version is selected. """ self.resnet_size = resnet_size if not data_format: data_format = ('channels_first' if tf.test.is_built_with_cuda() else 'channels_last') self.resnet_version = resnet_version if resnet_version not in (1, 2): raise ValueError( 'Resnet version should be 1 or 2. See README for citations.') self.bottleneck = bottleneck self.block_fn = _building_block_v2 if dtype not in ALLOWED_TYPES: raise ValueError('dtype must be one of: {}'.format(ALLOWED_TYPES)) self.data_format = data_format self.num_classes = num_classes self.num_filters = num_filters self.kernel_size = kernel_size self.conv_stride = conv_stride self.first_pool_size = first_pool_size self.first_pool_stride = first_pool_stride self.block_sizes = block_sizes self.block_strides = block_strides self.dtype = dtype self.pre_activation = resnet_version == 2
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def step_impl(context, name, field): context.request.add_file(context.helpers.file_path(name), field)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _custom_dtype_getter(self, # pylint: disable=keyword-arg-before-vararg getter, name, shape=None, dtype=DEFAULT_DTYPE, *args, **kwargs): """Creates variables in fp32, then casts to fp16 if necessary. This function is a custom getter. A custom getter is a function with the same signature as tf.get_variable, except it has an additional getter parameter. Custom getters can be passed as the `custom_getter` parameter of tf.variable_scope. Then, tf.get_variable will call the custom getter, instead of directly getting a variable itself. This can be used to change the types of variables that are retrieved with tf.get_variable. The `getter` parameter is the underlying variable getter, that would have been called if no custom getter was used. Custom getters typically get a variable with `getter`, then modify it in some way. This custom getter will create an fp32 variable. If a low precision (e.g. float16) variable was requested it will then cast the variable to the requested dtype. The reason we do not directly create variables in low precision dtypes is that applying small gradients to such variables may cause the variable not to change. Args: getter: The underlying variable getter, that has the same signature as tf.get_variable and returns a variable. name: The name of the variable to get. shape: The shape of the variable to get. dtype: The dtype of the variable to get. Note that if this is a low precision dtype, the variable will be created as a tf.float32 variable, then cast to the appropriate dtype *args: Additional arguments to pass unmodified to getter. **kwargs: Additional keyword arguments to pass unmodified to getter. Returns: A variable which is cast to fp16 if necessary. """ if dtype in CASTABLE_TYPES: var = getter(name, shape, tf.float32, *args, **kwargs) return tf.cast(var, dtype=dtype, name=name + '_cast') else: return getter(name, shape, dtype, *args, **kwargs)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def step_impl(context): data = json.loads(context.text) receiver_name = re.match(r"\<(\w+)'s id\>", data['receiver']).group(1) data['receiver'] = context.users[receiver_name]['id'] context.request.add_data(data)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def _model_variable_scope(self): """Returns a variable scope that the model should be created under. If self.dtype is a castable type, model variable will be created in fp32 then cast to self.dtype before being used. Returns: A variable scope for the model. """ return tf.compat.v1.variable_scope( 'resnet_model', custom_getter=self._custom_dtype_getter, reuse=tf.AUTO_REUSE)
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def step_impl(context, state, code): context.response = context.request.send() context.response.status_code.should.equal(int(code))
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def __call__(self, inputs, training): """Add operations to classify a batch of input images. Args: inputs: A Tensor representing a batch of input images. training: A boolean. Set to True to add operations required only when training the classifier. Returns: A logits Tensor with shape [<batch_size>, self.num_classes]. """ with self._model_variable_scope(): if self.data_format == 'channels_first': # Convert the inputs from channels_last (NHWC) to channels_first (NCHW). # This provides a large performance boost on GPU. See # https://www.tensorflow.org/performance/performance_guide#data_formats inputs = tf.transpose(a=inputs, perm=[0, 3, 1, 2]) inputs = conv2d_fixed_padding( inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size, strides=self.conv_stride, data_format=self.data_format, name='initial_input') inputs = tf.identity(inputs, 'initial_conv') # We do not include batch normalization or activation functions in V2 # for the initial conv1 because the first ResNet unit will perform these # for both the shortcut and non-shortcut paths as part of the first # block's projection. Cf. Appendix of [2]. if self.resnet_version == 1: inputs = batch_norm(inputs, training, self.data_format) inputs = tf.nn.relu(inputs) if self.first_pool_size: inputs = tf.compat.v1.layers.max_pooling2d( inputs=inputs, pool_size=self.first_pool_size, strides=self.first_pool_stride, padding='SAME', data_format=self.data_format) inputs = tf.identity(inputs, 'initial_max_pool') for i, num_blocks in enumerate(self.block_sizes): # We now have 4 block layers, but the last does not # double the number of filters. # We also skip the projection shortcut in the first block layer. num_filters = self.num_filters * min((2**i), 4) shortcut = i != 0 inputs = block_layer( inputs=inputs, filters=num_filters, bottleneck=self.bottleneck, block_fn=self.block_fn, blocks=num_blocks, strides=self.block_strides[i], training=training, name='block_layer{}'.format(i + 1), data_format=self.data_format, shortcut=shortcut) # Skip the last BN+relu. # Only apply the BN and ReLU for model that does pre_activation in each # building/bottleneck block, eg resnet V2. # if self.pre_activation: # inputs = batch_norm(inputs, training, self.data_format, # name='pre_act'+'batch_norm') # inputs = tf.nn.relu(inputs,name='pre_act'+'relu') # The current top layer has shape # `batch_size x pool_size x pool_size x final_size`. # ResNet does an Average Pooling layer over pool_size, # but that is the same as doing a reduce_mean. We do a reduce_mean # here because it performs better than AveragePooling2D. # Also perform max-pooling, and concat results. axes = [2, 3] if self.data_format == 'channels_first' else [1, 2] avg_pooled = tf.reduce_mean(input_tensor=inputs, axis=axes, keepdims=True) avg_pooled = tf.squeeze(avg_pooled, axes) max_pooled = tf.reduce_max(input_tensor=inputs, axis=axes, keepdims=True) max_pooled = tf.squeeze(max_pooled, axes) inputs = tf.concat([avg_pooled, max_pooled], axis=1) inputs = tf.identity(inputs, 'final_pooling') inputs = tf.compat.v1.layers.dense( inputs=inputs, units=self.num_classes, reuse=tf.AUTO_REUSE) inputs = tf.identity(inputs, 'final_dense') return inputs
def dist(a, b): return sum((i-j)**2 for i, j in zip(a, b))
def step_impl(context, count): cnt = len(context.response.json()) cnt.should.equal(int(count))

Dataset Card for "bad_code_to_good_code_dataset"

More Information needed

Downloads last month
0
Edit dataset card