Unnamed: 0
int64
0
2.44k
repo
stringlengths
32
81
hash
stringlengths
40
40
diff
stringlengths
113
1.17k
old_path
stringlengths
5
84
rewrite
stringlengths
34
79
initial_state
stringlengths
75
980
final_state
stringlengths
76
980
400
https://:@github.com/dmlc/keras.git
e2e281e14f619d9ade61c46567e2c599db070f16
@@ -23,7 +23,7 @@ def test_unitnorm_constraint(): lookup.compile(loss='binary_crossentropy', optimizer='sgd', class_mode='binary') lookup.train_on_batch(X1, np.array([[1], [0]], dtype='int32')) - norm = np.linalg.norm(K.get_value(lookup.params[0]), axis=1) + norm = np.linalg.norm(K.get_value(lookup.params[0]), axis=0) assert_allclose(norm, np.ones_like(norm).astype('float32'), rtol=1e-05)
tests/keras/layers/test_embeddings.py
ReplaceText(target='0' @(26,62)->(26,63))
def test_unitnorm_constraint(): lookup.compile(loss='binary_crossentropy', optimizer='sgd', class_mode='binary') lookup.train_on_batch(X1, np.array([[1], [0]], dtype='int32')) norm = np.linalg.norm(K.get_value(lookup.params[0]), axis=1) assert_allclose(norm, np.ones_like(norm).astype('float32'), rtol=1e-05)
def test_unitnorm_constraint(): lookup.compile(loss='binary_crossentropy', optimizer='sgd', class_mode='binary') lookup.train_on_batch(X1, np.array([[1], [0]], dtype='int32')) norm = np.linalg.norm(K.get_value(lookup.params[0]), axis=0) assert_allclose(norm, np.ones_like(norm).astype('float32'), rtol=1e-05)
401
https://:@github.com/dmlc/keras.git
e2e281e14f619d9ade61c46567e2c599db070f16
@@ -54,7 +54,7 @@ def test_identity_oddballs(): def test_unitnorm(): unitnorm_instance = constraints.unitnorm() normalized = unitnorm_instance(K.variable(example_array)) - norm_of_normalized = np.sqrt(np.sum(K.eval(normalized)**2, axis=1)) + norm_of_normalized = np.sqrt(np.sum(K.eval(normalized)**2, axis=0)) # in the unit norm constraint, it should be equal to 1. difference = norm_of_normalized - 1. largest_difference = np.max(np.abs(difference))
tests/keras/test_constraints.py
ReplaceText(target='0' @(57,68)->(57,69))
def test_identity_oddballs(): def test_unitnorm(): unitnorm_instance = constraints.unitnorm() normalized = unitnorm_instance(K.variable(example_array)) norm_of_normalized = np.sqrt(np.sum(K.eval(normalized)**2, axis=1)) # in the unit norm constraint, it should be equal to 1. difference = norm_of_normalized - 1. largest_difference = np.max(np.abs(difference))
def test_identity_oddballs(): def test_unitnorm(): unitnorm_instance = constraints.unitnorm() normalized = unitnorm_instance(K.variable(example_array)) norm_of_normalized = np.sqrt(np.sum(K.eval(normalized)**2, axis=0)) # in the unit norm constraint, it should be equal to 1. difference = norm_of_normalized - 1. largest_difference = np.max(np.abs(difference))
402
https://:@github.com/dmlc/keras.git
f4af11c7300816ca28b6b707fdf7d64b00430074
@@ -52,7 +52,7 @@ def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncati elif truncating == 'post': trunc = s[:maxlen] else: - raise ValueError("Truncating type '%s' not understood" % padding) + raise ValueError("Truncating type '%s' not understood" % truncating) # check `trunc` has expected shape trunc = np.asarray(trunc, dtype=dtype)
keras/preprocessing/sequence.py
ReplaceText(target='truncating' @(55,69)->(55,76))
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncati elif truncating == 'post': trunc = s[:maxlen] else: raise ValueError("Truncating type '%s' not understood" % padding) # check `trunc` has expected shape trunc = np.asarray(trunc, dtype=dtype)
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncati elif truncating == 'post': trunc = s[:maxlen] else: raise ValueError("Truncating type '%s' not understood" % truncating) # check `trunc` has expected shape trunc = np.asarray(trunc, dtype=dtype)
403
https://:@github.com/dmlc/keras.git
b61235b77f87288d62ddd8ce4aae88b76babf887
@@ -153,7 +153,7 @@ def check_array_lengths(X, Y, W): raise Exception('All input arrays (x) should have ' 'the same number of samples.') set_y = set(y_lengths) - if len(set_x) != 1: + if len(set_y) != 1: raise Exception('All target arrays (y) should have ' 'the same number of samples.') set_w = set(w_lengths)
keras/engine/training.py
ReplaceText(target='set_y' @(156,11)->(156,16))
def check_array_lengths(X, Y, W): raise Exception('All input arrays (x) should have ' 'the same number of samples.') set_y = set(y_lengths) if len(set_x) != 1: raise Exception('All target arrays (y) should have ' 'the same number of samples.') set_w = set(w_lengths)
def check_array_lengths(X, Y, W): raise Exception('All input arrays (x) should have ' 'the same number of samples.') set_y = set(y_lengths) if len(set_y) != 1: raise Exception('All target arrays (y) should have ' 'the same number of samples.') set_w = set(w_lengths)
404
https://:@github.com/dmlc/keras.git
98974efa5f51d6f55afbf2bc125d6fd090bcf782
@@ -510,7 +510,7 @@ class Model(Container): 'it should have one entry per model outputs. ' 'The model has ' + str(len(self.outputs)) + ' outputs, but you passed loss_weights=' + - str(loss)) + str(loss_weights)) loss_weights_list = loss_weights else: raise Exception('Could not interpret loss_weights argument: ' +
keras/engine/training.py
ReplaceText(target='loss_weights' @(513,36)->(513,40))
class Model(Container): 'it should have one entry per model outputs. ' 'The model has ' + str(len(self.outputs)) + ' outputs, but you passed loss_weights=' + str(loss)) loss_weights_list = loss_weights else: raise Exception('Could not interpret loss_weights argument: ' +
class Model(Container): 'it should have one entry per model outputs. ' 'The model has ' + str(len(self.outputs)) + ' outputs, but you passed loss_weights=' + str(loss_weights)) loss_weights_list = loss_weights else: raise Exception('Could not interpret loss_weights argument: ' +
405
https://:@github.com/dmlc/keras.git
48ae7217e482a1a3624d6e5380c972a653cacfaf
@@ -1136,7 +1136,7 @@ def rnn(step_function, inputs, initial_states, if mask is not None: if go_backwards: - mask = tf.reverse(mask, [True] + [False] * (ndim - 1)) + mask = tf.reverse(mask, [True] + [False] * (ndim - 2)) # Transpose not supported by bool tensor types, hence round-trip to uint8. mask = tf.cast(mask, tf.uint8)
keras/backend/tensorflow_backend.py
ReplaceText(target='2' @(1139,67)->(1139,68))
def rnn(step_function, inputs, initial_states, if mask is not None: if go_backwards: mask = tf.reverse(mask, [True] + [False] * (ndim - 1)) # Transpose not supported by bool tensor types, hence round-trip to uint8. mask = tf.cast(mask, tf.uint8)
def rnn(step_function, inputs, initial_states, if mask is not None: if go_backwards: mask = tf.reverse(mask, [True] + [False] * (ndim - 2)) # Transpose not supported by bool tensor types, hence round-trip to uint8. mask = tf.cast(mask, tf.uint8)
406
https://:@github.com/dmlc/keras.git
41741c38e5f29ebf69fe9bd82a604eba3c0b97e5
@@ -1261,7 +1261,7 @@ def rnn(step_function, inputs, initial_states, new_state = new_states[0] else: # return dummy state, otherwise _dynamic_rnn_loop breaks - new_state = output + new_state = state return output, new_state _step.state_size = state_size * nb_states
keras/backend/tensorflow_backend.py
ReplaceText(target='state' @(1264,32)->(1264,38))
def rnn(step_function, inputs, initial_states, new_state = new_states[0] else: # return dummy state, otherwise _dynamic_rnn_loop breaks new_state = output return output, new_state _step.state_size = state_size * nb_states
def rnn(step_function, inputs, initial_states, new_state = new_states[0] else: # return dummy state, otherwise _dynamic_rnn_loop breaks new_state = state return output, new_state _step.state_size = state_size * nb_states
407
https://:@github.com/dmlc/keras.git
80fbbc3a6a2a30f391bad2aa85e7558c50ca0709
@@ -411,7 +411,7 @@ class ImageDataGenerator(object): if self.zca_whitening: flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3])) - sigma = np.dot(flatX.T, flatX) / flatX.shape[1] + sigma = np.dot(flatX.T, flatX) / flatX.shape[0] U, S, V = linalg.svd(sigma) self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
keras/preprocessing/image.py
ReplaceText(target='0' @(414,57)->(414,58))
class ImageDataGenerator(object): if self.zca_whitening: flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3])) sigma = np.dot(flatX.T, flatX) / flatX.shape[1] U, S, V = linalg.svd(sigma) self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
class ImageDataGenerator(object): if self.zca_whitening: flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3])) sigma = np.dot(flatX.T, flatX) / flatX.shape[0] U, S, V = linalg.svd(sigma) self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
408
https://:@github.com/dmlc/keras.git
7bd5c862a271f125a76fa1ada7f0d9ae27159549
@@ -209,7 +209,7 @@ def check_loss_and_target_compatibility(targets, losses, output_shapes): 'which does expect integer targets.') if loss.__name__ in key_losses: for target_dim, out_dim in zip(y.shape[1:], shape[1:]): - if target_dim is not None and target_dim != out_dim: + if out_dim is not None and target_dim != out_dim: raise Exception('A target array with shape ' + str(y.shape) + ' was passed for an output of shape ' + str(shape) + ' while using as loss `' + loss.__name__ + '`. '
keras/engine/training.py
ReplaceText(target='out_dim' @(212,19)->(212,29))
def check_loss_and_target_compatibility(targets, losses, output_shapes): 'which does expect integer targets.') if loss.__name__ in key_losses: for target_dim, out_dim in zip(y.shape[1:], shape[1:]): if target_dim is not None and target_dim != out_dim: raise Exception('A target array with shape ' + str(y.shape) + ' was passed for an output of shape ' + str(shape) + ' while using as loss `' + loss.__name__ + '`. '
def check_loss_and_target_compatibility(targets, losses, output_shapes): 'which does expect integer targets.') if loss.__name__ in key_losses: for target_dim, out_dim in zip(y.shape[1:], shape[1:]): if out_dim is not None and target_dim != out_dim: raise Exception('A target array with shape ' + str(y.shape) + ' was passed for an output of shape ' + str(shape) + ' while using as loss `' + loss.__name__ + '`. '
409
https://:@github.com/dmlc/keras.git
cb4f93913eb871a5e234db0c31f885daff87ecdf
@@ -67,7 +67,7 @@ def _obtain_input_shape(input_shape, default_size, min_size, dim_ordering, inclu if input_shape is not None: if len(input_shape) != 3: raise ValueError('`input_shape` must be a tuple of three integers.') - if input_shape[1] != 3: + if input_shape[0] != 3: raise ValueError('The input must have 3 channels; got ' '`input_shape=' + str(input_shape) + '`') if ((input_shape[1] is not None and input_shape[1] < min_size) or
keras/applications/imagenet_utils.py
ReplaceText(target='0' @(70,31)->(70,32))
def _obtain_input_shape(input_shape, default_size, min_size, dim_ordering, inclu if input_shape is not None: if len(input_shape) != 3: raise ValueError('`input_shape` must be a tuple of three integers.') if input_shape[1] != 3: raise ValueError('The input must have 3 channels; got ' '`input_shape=' + str(input_shape) + '`') if ((input_shape[1] is not None and input_shape[1] < min_size) or
def _obtain_input_shape(input_shape, default_size, min_size, dim_ordering, inclu if input_shape is not None: if len(input_shape) != 3: raise ValueError('`input_shape` must be a tuple of three integers.') if input_shape[0] != 3: raise ValueError('The input must have 3 channels; got ' '`input_shape=' + str(input_shape) + '`') if ((input_shape[1] is not None and input_shape[1] < min_size) or
410
https://:@github.com/dmlc/keras.git
82ca6d418588ccd61d663ec8029937290b62d583
@@ -120,7 +120,7 @@ X = X[indices] y = y[indices] # Explicitly set apart 10% for validation data that we never train over -split_at = len(X) - len(X) / 10 +split_at = len(X) - len(X) // 10 (X_train, X_val) = (slice_X(X, 0, split_at), slice_X(X, split_at)) (y_train, y_val) = (y[:split_at], y[split_at:])
examples/addition_rnn.py
ReplaceText(target='//' @(123,27)->(123,28))
X = X[indices] y = y[indices] # Explicitly set apart 10% for validation data that we never train over split_at = len(X) - len(X) / 10 (X_train, X_val) = (slice_X(X, 0, split_at), slice_X(X, split_at)) (y_train, y_val) = (y[:split_at], y[split_at:])
X = X[indices] y = y[indices] # Explicitly set apart 10% for validation data that we never train over split_at = len(X) - len(X) // 10 (X_train, X_val) = (slice_X(X, 0, split_at), slice_X(X, split_at)) (y_train, y_val) = (y[:split_at], y[split_at:])
411
https://:@github.com/dmlc/keras.git
7c34add25d0f6a773f1c74d1d8bb50f4482afc76
@@ -1875,7 +1875,7 @@ def set_value(x, value): """Sets the value of a variable, from a Numpy array. It returns `None`. """ - if isinstance(x, Number): + if isinstance(value, Number): value = [value] x.bind(mx.nd.array(value))
keras/backend/mxnet_backend.py
ReplaceText(target='value' @(1878,18)->(1878,19))
def set_value(x, value): """Sets the value of a variable, from a Numpy array. It returns `None`. """ if isinstance(x, Number): value = [value] x.bind(mx.nd.array(value))
def set_value(x, value): """Sets the value of a variable, from a Numpy array. It returns `None`. """ if isinstance(value, Number): value = [value] x.bind(mx.nd.array(value))
412
https://:@github.com/incountry/sdk-python.git
e98e4243dae300a5ae9cab87347fa61bb87b51a9
@@ -28,6 +28,6 @@ storage = Storage( while not migration_complete: migration_res = storage.migrate(country=COUNTRY, limit=50) - if migration_res["total_left"] <= 0: + if migration_res["total_left"] == 0: migration_complete = True time.sleep(1)
examples/full_migration.py
ReplaceText(target='==' @(31,35)->(31,37))
storage = Storage( while not migration_complete: migration_res = storage.migrate(country=COUNTRY, limit=50) if migration_res["total_left"] <= 0: migration_complete = True time.sleep(1)
storage = Storage( while not migration_complete: migration_res = storage.migrate(country=COUNTRY, limit=50) if migration_res["total_left"] == 0: migration_complete = True time.sleep(1)
413
https://:@github.com/knockrentals/scrapy-elasticsearch.git
1c5c20459e68544eeb8a0490b9f7b14895861b24
@@ -96,7 +96,7 @@ class ElasticSearchPipeline(object): self.items_buffer.append(index_action) - if len(self.items_buffer) == self.settings.get('ELASTICSEARCH_BUFFER_LENGTH', 500): + if len(self.items_buffer) >= self.settings.get('ELASTICSEARCH_BUFFER_LENGTH', 500): self.send_items() self.items_buffer = []
scrapyelasticsearch/scrapyelasticsearch.py
ReplaceText(target='>=' @(99,34)->(99,36))
class ElasticSearchPipeline(object): self.items_buffer.append(index_action) if len(self.items_buffer) == self.settings.get('ELASTICSEARCH_BUFFER_LENGTH', 500): self.send_items() self.items_buffer = []
class ElasticSearchPipeline(object): self.items_buffer.append(index_action) if len(self.items_buffer) >= self.settings.get('ELASTICSEARCH_BUFFER_LENGTH', 500): self.send_items() self.items_buffer = []
414
https://:@github.com/smithlabcode/ribotricer.git
805255ef81f4c94dcaf9f0c63ba39f0de7f14eea
@@ -149,7 +149,7 @@ def detect_orfs_cmd(bam, ribocop_index, prefix, stranded, read_lengths, sys.exit('Error: cannot convert psite_offsets into integers') if len(read_lengths) != len(psite_offsets): sys.exit('Error: psite_offsets must match read_lengths') - if not all(x > 0 for x in psite_offsets): + if not all(x >= 0 for x in psite_offsets): sys.exit('Error: P-site offset must be >= 0') if not all(x > y for (x, y) in zip(read_lengths, psite_offsets)): sys.exit('Error: P-site offset must be smaller than read length')
RiboCop/cli.py
ReplaceText(target='>=' @(152,21)->(152,22))
def detect_orfs_cmd(bam, ribocop_index, prefix, stranded, read_lengths, sys.exit('Error: cannot convert psite_offsets into integers') if len(read_lengths) != len(psite_offsets): sys.exit('Error: psite_offsets must match read_lengths') if not all(x > 0 for x in psite_offsets): sys.exit('Error: P-site offset must be >= 0') if not all(x > y for (x, y) in zip(read_lengths, psite_offsets)): sys.exit('Error: P-site offset must be smaller than read length')
def detect_orfs_cmd(bam, ribocop_index, prefix, stranded, read_lengths, sys.exit('Error: cannot convert psite_offsets into integers') if len(read_lengths) != len(psite_offsets): sys.exit('Error: psite_offsets must match read_lengths') if not all(x >= 0 for x in psite_offsets): sys.exit('Error: P-site offset must be >= 0') if not all(x > y for (x, y) in zip(read_lengths, psite_offsets)): sys.exit('Error: P-site offset must be smaller than read length')
415
https://:@github.com/smithlabcode/ribotricer.git
9e9c9497eb5c2669bd272729e19f47e2cbf2b3db
@@ -321,7 +321,7 @@ def prepare_orfs(gtf, fasta, prefix, min_orf_length, start_codons, for orf in tqdm(candidate_orfs): coordinate = ','.join( ['{}-{}'.format(iv.start, iv.end) for iv in orf.intervals]) - to_write = formatter.format(orf.oid, orf.category, orf.tid, orf.ttype, + to_write += formatter.format(orf.oid, orf.category, orf.tid, orf.ttype, orf.gid, orf.gname, orf.gtype, orf.chrom, orf.strand, coordinate)
RiboCop/prepare_orfs.py
ReplaceText(target='+=' @(324,17)->(324,18))
def prepare_orfs(gtf, fasta, prefix, min_orf_length, start_codons, for orf in tqdm(candidate_orfs): coordinate = ','.join( ['{}-{}'.format(iv.start, iv.end) for iv in orf.intervals]) to_write = formatter.format(orf.oid, orf.category, orf.tid, orf.ttype, orf.gid, orf.gname, orf.gtype, orf.chrom, orf.strand, coordinate)
def prepare_orfs(gtf, fasta, prefix, min_orf_length, start_codons, for orf in tqdm(candidate_orfs): coordinate = ','.join( ['{}-{}'.format(iv.start, iv.end) for iv in orf.intervals]) to_write += formatter.format(orf.oid, orf.category, orf.tid, orf.ttype, orf.gid, orf.gname, orf.gtype, orf.chrom, orf.strand, coordinate)
416
https://:@github.com/pughlab/ConsensusCruncher.git
3622410b893b07afb4e423a4537eab33da26a55d
@@ -147,7 +147,7 @@ def main(): sscs_bam = pysam.AlignmentFile(args.infile, "rb") dcs_bam = pysam.AlignmentFile(args.outfile, "wb", template=sscs_bam) - if re.search('dcs.sc', args.outfile) is None: + if re.search('dcs.sc', args.outfile) is not None: sscs_singleton_bam = pysam.AlignmentFile('{}.sscs.sc.singleton.bam'.format(args.outfile.split('.dcs.sc')[0]), "wb", template=sscs_bam) dcs_header = "DCS - Singleton Correction"
ConsensusCruncher/DCS_maker.py
ReplaceText(target=' is not ' @(150,40)->(150,44))
def main(): sscs_bam = pysam.AlignmentFile(args.infile, "rb") dcs_bam = pysam.AlignmentFile(args.outfile, "wb", template=sscs_bam) if re.search('dcs.sc', args.outfile) is None: sscs_singleton_bam = pysam.AlignmentFile('{}.sscs.sc.singleton.bam'.format(args.outfile.split('.dcs.sc')[0]), "wb", template=sscs_bam) dcs_header = "DCS - Singleton Correction"
def main(): sscs_bam = pysam.AlignmentFile(args.infile, "rb") dcs_bam = pysam.AlignmentFile(args.outfile, "wb", template=sscs_bam) if re.search('dcs.sc', args.outfile) is not None: sscs_singleton_bam = pysam.AlignmentFile('{}.sscs.sc.singleton.bam'.format(args.outfile.split('.dcs.sc')[0]), "wb", template=sscs_bam) dcs_header = "DCS - Singleton Correction"
417
https://:@github.com/django-guardian/django-guardian.git
f60306eb93fd276879806d6e78557bdb5d1ce34f
@@ -172,7 +172,7 @@ def get_obj_perms_model(obj, base_cls, generic_cls): for attr in fields: model = getattr(attr, 'related_model', None) if (model and issubclass(model, base_cls) and - model is not generic_cls and getattr(attr, 'enabled', True)): + model is not generic_cls and getattr(model, 'enabled', True)): # if model is generic one it would be returned anyway if not model.objects.is_generic(): # make sure that content_object's content_type is same as
guardian/utils.py
ReplaceText(target='model' @(175,53)->(175,57))
def get_obj_perms_model(obj, base_cls, generic_cls): for attr in fields: model = getattr(attr, 'related_model', None) if (model and issubclass(model, base_cls) and model is not generic_cls and getattr(attr, 'enabled', True)): # if model is generic one it would be returned anyway if not model.objects.is_generic(): # make sure that content_object's content_type is same as
def get_obj_perms_model(obj, base_cls, generic_cls): for attr in fields: model = getattr(attr, 'related_model', None) if (model and issubclass(model, base_cls) and model is not generic_cls and getattr(model, 'enabled', True)): # if model is generic one it would be returned anyway if not model.objects.is_generic(): # make sure that content_object's content_type is same as
418
https://:@github.com/podhmo/magicalimport.git
293f619fee3f401ebe0daf55f001354ecf2a2124
@@ -69,7 +69,7 @@ def import_symbol(sym, here=None, sep=":", ns=None): sym = "{}:{}".format(ns, sym) module_path, fn_name = sym.rsplit(sep, 2) try: - module = import_module(sym, here=here, sep=sep) + module = import_module(module_path, here=here, sep=sep) return getattr(module, fn_name) except (ImportError, AttributeError) as e: sys.stderr.write("could not import {!r}\n{}\n".format(sym, e))
magicalimport/__init__.py
ReplaceText(target='module_path' @(72,31)->(72,34))
def import_symbol(sym, here=None, sep=":", ns=None): sym = "{}:{}".format(ns, sym) module_path, fn_name = sym.rsplit(sep, 2) try: module = import_module(sym, here=here, sep=sep) return getattr(module, fn_name) except (ImportError, AttributeError) as e: sys.stderr.write("could not import {!r}\n{}\n".format(sym, e))
def import_symbol(sym, here=None, sep=":", ns=None): sym = "{}:{}".format(ns, sym) module_path, fn_name = sym.rsplit(sep, 2) try: module = import_module(module_path, here=here, sep=sep) return getattr(module, fn_name) except (ImportError, AttributeError) as e: sys.stderr.write("could not import {!r}\n{}\n".format(sym, e))
419
https://:@github.com/sigmavirus24/betamax.git
9d84fcffbdf41133dbdd686490c993d63e0243fc
@@ -112,7 +112,7 @@ def deserialize_response(serialized): for header_name, header_list in serialized['headers'].items(): if isinstance(header_list, list): for header_value in header_list: - header_dict.add(header_name, header_list) + header_dict.add(header_name, header_value) else: header_dict.add(header_name, header_list) r.headers = CaseInsensitiveDict(header_dict)
betamax/cassette/util.py
ReplaceText(target='header_value' @(115,45)->(115,56))
def deserialize_response(serialized): for header_name, header_list in serialized['headers'].items(): if isinstance(header_list, list): for header_value in header_list: header_dict.add(header_name, header_list) else: header_dict.add(header_name, header_list) r.headers = CaseInsensitiveDict(header_dict)
def deserialize_response(serialized): for header_name, header_list in serialized['headers'].items(): if isinstance(header_list, list): for header_value in header_list: header_dict.add(header_name, header_value) else: header_dict.add(header_name, header_list) r.headers = CaseInsensitiveDict(header_dict)
420
https://:@github.com/CodyKochmann/generators.git
0c99248a9a96a675d6995855c4a9ae0efebef329
@@ -17,7 +17,7 @@ def itemgetter(iterable, indexes): for i,x in enumerate(iterable): if i in positive_indexes: out[i]=x - negative_index_buffer.append(i) + negative_index_buffer.append(x) out.update({ni:negative_index_buffer[ni] for ni in negative_indexes}) else: # if just positive results
generators/itemgetter.py
ReplaceText(target='x' @(20,41)->(20,42))
def itemgetter(iterable, indexes): for i,x in enumerate(iterable): if i in positive_indexes: out[i]=x negative_index_buffer.append(i) out.update({ni:negative_index_buffer[ni] for ni in negative_indexes}) else: # if just positive results
def itemgetter(iterable, indexes): for i,x in enumerate(iterable): if i in positive_indexes: out[i]=x negative_index_buffer.append(x) out.update({ni:negative_index_buffer[ni] for ni in negative_indexes}) else: # if just positive results
421
https://:@github.com/reiinakano/xcessiv.git
b197e370a6f8a46f6ba3e9b77fb76f150f28edd5
@@ -146,7 +146,7 @@ def evaluate_stacked_ensemble(path, ensemble_id): ) preds = [] trues_list = [] - for train_index, test_index in cv.split(X, y): + for train_index, test_index in cv.split(secondary_features, y): X_train, X_test = secondary_features[train_index], secondary_features[test_index] y_train, y_test = y[train_index], y[test_index] est = est.fit(X_train, y_train)
xcessiv/rqtasks.py
ReplaceText(target='secondary_features' @(149,52)->(149,53))
def evaluate_stacked_ensemble(path, ensemble_id): ) preds = [] trues_list = [] for train_index, test_index in cv.split(X, y): X_train, X_test = secondary_features[train_index], secondary_features[test_index] y_train, y_test = y[train_index], y[test_index] est = est.fit(X_train, y_train)
def evaluate_stacked_ensemble(path, ensemble_id): ) preds = [] trues_list = [] for train_index, test_index in cv.split(secondary_features, y): X_train, X_test = secondary_features[train_index], secondary_features[test_index] y_train, y_test = y[train_index], y[test_index] est = est.fit(X_train, y_train)
422
https://:@github.com/MosesofEgypt/mozzarilla.git
5c0f1e29111be71edbb67ef13094a0cdc83d760c
@@ -684,7 +684,7 @@ def _compile_model_animations(self): print(error) self.update() - if messagebox.askyesno( + if not messagebox.askyesno( "Model_animations compilation failed", "Errors occurred while compiling animations(check console). " "Do you want to save the model_animations tag anyway?",
mozzarilla/tools/animations_compiler_window.py
ReplaceText(target='not ' @(687,15)->(687,15))
def _compile_model_animations(self): print(error) self.update() if messagebox.askyesno( "Model_animations compilation failed", "Errors occurred while compiling animations(check console). " "Do you want to save the model_animations tag anyway?",
def _compile_model_animations(self): print(error) self.update() if not messagebox.askyesno( "Model_animations compilation failed", "Errors occurred while compiling animations(check console). " "Do you want to save the model_animations tag anyway?",
423
https://:@github.com/HeeroYui/lutin.git
9fc593fb59a192ddf5f50a96e2a6cba76dab73b6
@@ -23,7 +23,7 @@ class System(system.System): # no check needed ==> just add this: self.add_module_depend(['c']) self.add_export_flag('link-lib', 'X11') - if env.get_isolate_system() == False: + if env.get_isolate_system() == True: self.add_header_file([ "/usr/include/X11/*" ],
lutin/z_system/lutinSystem_Linux_X11.py
ReplaceText(target='True' @(26,33)->(26,38))
class System(system.System): # no check needed ==> just add this: self.add_module_depend(['c']) self.add_export_flag('link-lib', 'X11') if env.get_isolate_system() == False: self.add_header_file([ "/usr/include/X11/*" ],
class System(system.System): # no check needed ==> just add this: self.add_module_depend(['c']) self.add_export_flag('link-lib', 'X11') if env.get_isolate_system() == True: self.add_header_file([ "/usr/include/X11/*" ],
424
https://:@github.com/sanger-pathogens/ariba.git
8625628cf307e533bb6e778d9b8e936e48cef727
@@ -294,7 +294,7 @@ class ReferenceData: def sanity_check(self, outprefix): variants_only_removed = self._remove_bad_genes(self.seq_dicts['variants_only'], outprefix + '.00.check_fasta_variants_only.log') presence_absence_removed = self._remove_bad_genes(self.seq_dicts['presence_absence'], outprefix + '.00.check_fasta_presence_absence.log') - self._filter_bad_variant_data(outprefix + '.01.check_variants', variants_only_removed, presence_absence_removed) + self._filter_bad_variant_data(outprefix + '.01.check_variants', presence_absence_removed, variants_only_removed) @classmethod
ariba/reference_data.py
ArgSwap(idxs=1<->2 @(297,8)->(297,37))
class ReferenceData: def sanity_check(self, outprefix): variants_only_removed = self._remove_bad_genes(self.seq_dicts['variants_only'], outprefix + '.00.check_fasta_variants_only.log') presence_absence_removed = self._remove_bad_genes(self.seq_dicts['presence_absence'], outprefix + '.00.check_fasta_presence_absence.log') self._filter_bad_variant_data(outprefix + '.01.check_variants', variants_only_removed, presence_absence_removed) @classmethod
class ReferenceData: def sanity_check(self, outprefix): variants_only_removed = self._remove_bad_genes(self.seq_dicts['variants_only'], outprefix + '.00.check_fasta_variants_only.log') presence_absence_removed = self._remove_bad_genes(self.seq_dicts['presence_absence'], outprefix + '.00.check_fasta_presence_absence.log') self._filter_bad_variant_data(outprefix + '.01.check_variants', presence_absence_removed, variants_only_removed) @classmethod
425
https://:@github.com/sanger-pathogens/ariba.git
c70bc90299a1c5a85f20127ac8c750925219316b
@@ -192,7 +192,7 @@ class Summary: if self.show_known_het and (cluster, variant) in all_het_snps: rows[filename][cluster][key + '.%'] = 'NA' - if self.show_known_het and (ref_name, variant) in all_het_snps and key + '.%' not in rows[filename][cluster]: + if self.show_known_het and (cluster, variant) in all_het_snps and key + '.%' not in rows[filename][cluster]: rows[filename][cluster][key + '.%'] = 'NA' for key, wanted in self.cluster_columns.items():
ariba/summary.py
ReplaceText(target='cluster' @(195,52)->(195,60))
class Summary: if self.show_known_het and (cluster, variant) in all_het_snps: rows[filename][cluster][key + '.%'] = 'NA' if self.show_known_het and (ref_name, variant) in all_het_snps and key + '.%' not in rows[filename][cluster]: rows[filename][cluster][key + '.%'] = 'NA' for key, wanted in self.cluster_columns.items():
class Summary: if self.show_known_het and (cluster, variant) in all_het_snps: rows[filename][cluster][key + '.%'] = 'NA' if self.show_known_het and (cluster, variant) in all_het_snps and key + '.%' not in rows[filename][cluster]: rows[filename][cluster][key + '.%'] = 'NA' for key, wanted in self.cluster_columns.items():
426
https://:@github.com/sanger-pathogens/ariba.git
1fd2c639e7b24a69252390744ae4e1a9e49db5dd
@@ -47,7 +47,7 @@ class MlstReporter: depths = [int(x) for x in d['smtls_nts_depth'].split(',')] depths.sort() het_pc = round(100.0 * depths[-1] / sum(depths), 2) - if results['hetmin'] == '.' or results['hetmin'] < het_pc: + if results['hetmin'] == '.' or results['hetmin'] > het_pc: results['hetmin'] = het_pc if len(het_data): results['hets'] = '.'.join(het_data)
ariba/mlst_reporter.py
ReplaceText(target='>' @(50,69)->(50,70))
class MlstReporter: depths = [int(x) for x in d['smtls_nts_depth'].split(',')] depths.sort() het_pc = round(100.0 * depths[-1] / sum(depths), 2) if results['hetmin'] == '.' or results['hetmin'] < het_pc: results['hetmin'] = het_pc if len(het_data): results['hets'] = '.'.join(het_data)
class MlstReporter: depths = [int(x) for x in d['smtls_nts_depth'].split(',')] depths.sort() het_pc = round(100.0 * depths[-1] / sum(depths), 2) if results['hetmin'] == '.' or results['hetmin'] > het_pc: results['hetmin'] = het_pc if len(het_data): results['hets'] = '.'.join(het_data)
427
https://:@github.com/urschrei/pyzotero.git
cdfd191116363c947fc0a0d0b4f37849d709f9f2
@@ -40,7 +40,7 @@ def check(): return library_version == git_version if __name__ == '__main__': - if check(): + if not check(): sys.exit(1) else: sys.exit(0)
pre-deploy.py
ReplaceText(target='not ' @(43,7)->(43,7))
def check(): return library_version == git_version if __name__ == '__main__': if check(): sys.exit(1) else: sys.exit(0)
def check(): return library_version == git_version if __name__ == '__main__': if not check(): sys.exit(1) else: sys.exit(0)
428
https://:@github.com/lyft/confidant.git
5de06bb144ad392dba5ef9c75603eb9587dfcfe3
@@ -410,7 +410,7 @@ def update_credential(id): include_credential_pairs=True, ) credential_response.permissions = permissions - return credential_response_schema.dumps(permissions) + return credential_response_schema.dumps(credential_response) @blueprint.route('/v1/credentials/<id>/<to_revision>', methods=['PUT'])
confidant/routes/credentials.py
ReplaceText(target='credential_response' @(413,44)->(413,55))
def update_credential(id): include_credential_pairs=True, ) credential_response.permissions = permissions return credential_response_schema.dumps(permissions) @blueprint.route('/v1/credentials/<id>/<to_revision>', methods=['PUT'])
def update_credential(id): include_credential_pairs=True, ) credential_response.permissions = permissions return credential_response_schema.dumps(credential_response) @blueprint.route('/v1/credentials/<id>/<to_revision>', methods=['PUT'])
429
https://:@github.com/cloudenvy/cloudenvy.git
a19f6f84832b1dfddbf5ad7b1e84790842b22712
@@ -35,7 +35,7 @@ class Files(cloudenvy.envy.Command): logging.info("Copying file from '%s' to '%s'", local_path, remote_path) - if os.path.exists(local_path): + if not os.path.exists(local_path): logging.error("Local file '%s' not found.", local_path) dest_dir = _parse_directory(remote_path)
cloudenvy/commands/files.py
ReplaceText(target='not ' @(38,23)->(38,23))
class Files(cloudenvy.envy.Command): logging.info("Copying file from '%s' to '%s'", local_path, remote_path) if os.path.exists(local_path): logging.error("Local file '%s' not found.", local_path) dest_dir = _parse_directory(remote_path)
class Files(cloudenvy.envy.Command): logging.info("Copying file from '%s' to '%s'", local_path, remote_path) if not os.path.exists(local_path): logging.error("Local file '%s' not found.", local_path) dest_dir = _parse_directory(remote_path)
430
https://:@github.com/WeiXuanChan/autoD.git
e163474f70ed6a02f39cd6edaa298271e5f23327
@@ -520,7 +520,7 @@ class Imaginary(AD): class Absolute(AD): def __init__(self,func): self.func=func - self.abs=(Real(func)**2.-Imaginary(func)**2.)**0.5 + self.abs=(Real(func)**2.+Imaginary(func)**2.)**0.5 try: self.dependent=func.dependent[:] except AttributeError:
autoD.py
ReplaceText(target='+' @(523,32)->(523,33))
class Imaginary(AD): class Absolute(AD): def __init__(self,func): self.func=func self.abs=(Real(func)**2.-Imaginary(func)**2.)**0.5 try: self.dependent=func.dependent[:] except AttributeError:
class Imaginary(AD): class Absolute(AD): def __init__(self,func): self.func=func self.abs=(Real(func)**2.+Imaginary(func)**2.)**0.5 try: self.dependent=func.dependent[:] except AttributeError:
431
https://:@github.com/dwavesystems/dimod.git
79979454139757bd49c1e31c67d890c1d5efeee2
@@ -279,7 +279,7 @@ class PolyScaleComposite(ComposedPolySampler): # we need to know how much we scaled by, which we can do by looking # at the biases try: - v = next((v for v, bias in poly.items() if bias)) + v = next((v for v, bias in original.items() if bias)) except StopIteration: # nothing to scale scalar = 1
dimod/reference/composites/higherordercomposites.py
ReplaceText(target='original' @(282,43)->(282,47))
class PolyScaleComposite(ComposedPolySampler): # we need to know how much we scaled by, which we can do by looking # at the biases try: v = next((v for v, bias in poly.items() if bias)) except StopIteration: # nothing to scale scalar = 1
class PolyScaleComposite(ComposedPolySampler): # we need to know how much we scaled by, which we can do by looking # at the biases try: v = next((v for v, bias in original.items() if bias)) except StopIteration: # nothing to scale scalar = 1
432
https://:@github.com/dwavesystems/dimod.git
c21ee99ab65a519b822689361fcfcc66ffb890f2
@@ -2146,7 +2146,7 @@ class TestSerialization(unittest.TestCase): new = dimod.BinaryQuadraticModel.from_serializable(bqm.to_serializable(use_bytes=True)) self.assertEqual(bqm, new) - self.assertEqual(bqm.info, {"tag": 5}) + self.assertEqual(new.info, {"tag": 5}) class TestZeroField(unittest.TestCase):
tests/test_binary_quadratic_model.py
ReplaceText(target='new' @(2149,25)->(2149,28))
class TestSerialization(unittest.TestCase): new = dimod.BinaryQuadraticModel.from_serializable(bqm.to_serializable(use_bytes=True)) self.assertEqual(bqm, new) self.assertEqual(bqm.info, {"tag": 5}) class TestZeroField(unittest.TestCase):
class TestSerialization(unittest.TestCase): new = dimod.BinaryQuadraticModel.from_serializable(bqm.to_serializable(use_bytes=True)) self.assertEqual(bqm, new) self.assertEqual(new.info, {"tag": 5}) class TestZeroField(unittest.TestCase):
433
https://:@github.com/dwavesystems/dimod.git
ceee47e049c2c3305d459c6ae865a430dbd113e9
@@ -197,7 +197,7 @@ def ran_r(r, graph, cls=BinaryQuadraticModel, seed=None): rvals = np.empty(2*r) rvals[0:r] = range(-r, 0) rvals[r:] = range(1, r+1) - qdata = rnd.choice(rvals, size=len(variables)) + qdata = rnd.choice(rvals, size=len(irow)) offset = 0
dimod/generators/random.py
ReplaceText(target='irow' @(200,39)->(200,48))
def ran_r(r, graph, cls=BinaryQuadraticModel, seed=None): rvals = np.empty(2*r) rvals[0:r] = range(-r, 0) rvals[r:] = range(1, r+1) qdata = rnd.choice(rvals, size=len(variables)) offset = 0
def ran_r(r, graph, cls=BinaryQuadraticModel, seed=None): rvals = np.empty(2*r) rvals[0:r] = range(-r, 0) rvals[r:] = range(1, r+1) qdata = rnd.choice(rvals, size=len(irow)) offset = 0
434
https://:@github.com/dvdotsenko/jsonrpc.py.git
92ad90db194c878cb2023e97758671d72c976797
@@ -75,7 +75,7 @@ class JSONPRCWSGIApplicationTestSuite(TestCase): response_json = responses_data[0] assert 'error' not in response_json - assert response_json['id'] == request2['id'] + assert response_json['id'] == request1['id'] assert response_json['result'] == 5 response_json = responses_data[1]
tests/test_wsgi_application.py
ReplaceText(target='request1' @(78,38)->(78,46))
class JSONPRCWSGIApplicationTestSuite(TestCase): response_json = responses_data[0] assert 'error' not in response_json assert response_json['id'] == request2['id'] assert response_json['result'] == 5 response_json = responses_data[1]
class JSONPRCWSGIApplicationTestSuite(TestCase): response_json = responses_data[0] assert 'error' not in response_json assert response_json['id'] == request1['id'] assert response_json['result'] == 5 response_json = responses_data[1]
435
https://:@github.com/MarSoft/ses-mailer-2.git
8c1b6aafc09412a6b6b2b1a69337ccbd99fc43f2
@@ -264,7 +264,7 @@ class Mail(object): for ob in optional_blocks: if ob in blocks: if ob == "format" and \ - mail_params[ob].lower() not in ["html", "text"]: + blocks[ob].lower() not in ["html", "text"]: continue mail_params[ob] = blocks[ob] return mail_params
ses_mailer.py
ReplaceText(target='blocks' @(267,24)->(267,35))
class Mail(object): for ob in optional_blocks: if ob in blocks: if ob == "format" and \ mail_params[ob].lower() not in ["html", "text"]: continue mail_params[ob] = blocks[ob] return mail_params
class Mail(object): for ob in optional_blocks: if ob in blocks: if ob == "format" and \ blocks[ob].lower() not in ["html", "text"]: continue mail_params[ob] = blocks[ob] return mail_params
436
https://:@github.com/interpretml/interpret.git
dfae1d47394d50472e25717c53c245cbe9f8a5ad
@@ -1067,7 +1067,7 @@ class BaseEBM(BaseEstimator): "scores_range": bounds, } feature_list.append(feature_dict) - density_dict.append({}) + density_list.append({}) data_dict = { "type": "pairwise",
python/interpret/glassbox/ebm/ebm.py
ReplaceText(target='density_list' @(1070,16)->(1070,28))
class BaseEBM(BaseEstimator): "scores_range": bounds, } feature_list.append(feature_dict) density_dict.append({}) data_dict = { "type": "pairwise",
class BaseEBM(BaseEstimator): "scores_range": bounds, } feature_list.append(feature_dict) density_list.append({}) data_dict = { "type": "pairwise",
437
https://:@gitlab.com/serial-lab/random-flavorpack.git
e69ba47e04a84e1746363a93d33bfe2ca9581cd5
@@ -76,7 +76,7 @@ class RandomGenerator(Generator): if maximum <= minimum: raise ValueError( _("The maximum can not be less than the minimum.")) - if start < minimum or start >= maximum: + if start < minimum or start > maximum: raise ValueError( _("The start must be between the minimum and maximum!")) rnrange = maximum - minimum
random_flavorpack/generators/random.py
ReplaceText(target='>' @(79,36)->(79,38))
class RandomGenerator(Generator): if maximum <= minimum: raise ValueError( _("The maximum can not be less than the minimum.")) if start < minimum or start >= maximum: raise ValueError( _("The start must be between the minimum and maximum!")) rnrange = maximum - minimum
class RandomGenerator(Generator): if maximum <= minimum: raise ValueError( _("The maximum can not be less than the minimum.")) if start < minimum or start > maximum: raise ValueError( _("The start must be between the minimum and maximum!")) rnrange = maximum - minimum
438
https://:@github.com/Ezibenroc/PyRoaringBitMap.git
7081ceba18ccaf2ee80d3c142e6e612cf77d17d2
@@ -779,7 +779,7 @@ class OptimizationTest(unittest.TestCase): self.assertGreater(bm2.shrink_to_fit(), 0) self.assertEqual(bm2.shrink_to_fit(), 0) bm3 = cls(bm1, optimize=True) - self.assertEqual(bm2.shrink_to_fit(), 0) + self.assertEqual(bm3.shrink_to_fit(), 0) class VersionTest(unittest.TestCase):
test.py
ReplaceText(target='bm3' @(782,25)->(782,28))
class OptimizationTest(unittest.TestCase): self.assertGreater(bm2.shrink_to_fit(), 0) self.assertEqual(bm2.shrink_to_fit(), 0) bm3 = cls(bm1, optimize=True) self.assertEqual(bm2.shrink_to_fit(), 0) class VersionTest(unittest.TestCase):
class OptimizationTest(unittest.TestCase): self.assertGreater(bm2.shrink_to_fit(), 0) self.assertEqual(bm2.shrink_to_fit(), 0) bm3 = cls(bm1, optimize=True) self.assertEqual(bm3.shrink_to_fit(), 0) class VersionTest(unittest.TestCase):
439
https://:@github.com/Cavenfish/autogamess.git
09def521ebf6c9686479439d71aee114d554a5de
@@ -136,8 +136,8 @@ def new_project(maindir, csvfile, ebasis_dir, initial_coords_dict=None, #Run Input Builder function save_dir = maindir + 'inputs/' - input_builder(csvfile, initial_coords_dict, ebasis_dir, - save_dir, title.replace('/', '\n')) + input_builder(csvfile, save_dir, ebasis_dir, + initial_coords_dict, title.replace('/', '\n')) return
autogamess/new_project.py
ArgSwap(idxs=1<->3 @(139,4)->(139,17))
def new_project(maindir, csvfile, ebasis_dir, initial_coords_dict=None, #Run Input Builder function save_dir = maindir + 'inputs/' input_builder(csvfile, initial_coords_dict, ebasis_dir, save_dir, title.replace('/', '\n')) return
def new_project(maindir, csvfile, ebasis_dir, initial_coords_dict=None, #Run Input Builder function save_dir = maindir + 'inputs/' input_builder(csvfile, save_dir, ebasis_dir, initial_coords_dict, title.replace('/', '\n')) return
440
https://:@github.com/Cavenfish/autogamess.git
4dcbf5d1a0f9059f8bdbc1a346c8f9cced70f62d
@@ -136,7 +136,7 @@ def new_project(maindir, csvfile, ebasis_dir, initial_coords_dict=None, #Run Input Builder function save_dir = maindir + 'inputs/' - input_builder(csvfile, save_dir, ebasis_dir, + input_builder(csvfile, ebasis_dir, save_dir, initial_coords_dict, title.replace('/', '\n'))
autogamess/new_project.py
ArgSwap(idxs=1<->2 @(139,4)->(139,17))
def new_project(maindir, csvfile, ebasis_dir, initial_coords_dict=None, #Run Input Builder function save_dir = maindir + 'inputs/' input_builder(csvfile, save_dir, ebasis_dir, initial_coords_dict, title.replace('/', '\n'))
def new_project(maindir, csvfile, ebasis_dir, initial_coords_dict=None, #Run Input Builder function save_dir = maindir + 'inputs/' input_builder(csvfile, ebasis_dir, save_dir, initial_coords_dict, title.replace('/', '\n'))
441
https://:@github.com/Cavenfish/autogamess.git
3890ccfd3dc7e723a37b8b5308d59a8de0b6f807
@@ -306,6 +306,6 @@ def fill_spreadsheets(projdir=False, sorteddir=False, sheetsdir=False): if vsc in df: df[vsc].to_excel(writer, sheet_name=vsc, startrow=6) if cmp in df: - df[cmp].to_excel(writer, sheet_name=vsc, startrow=6) + df[cmp].to_excel(writer, sheet_name=cmp, startrow=6) return
autogamess/fill_spreadsheets.py
ReplaceText(target='cmp' @(309,52)->(309,55))
def fill_spreadsheets(projdir=False, sorteddir=False, sheetsdir=False): if vsc in df: df[vsc].to_excel(writer, sheet_name=vsc, startrow=6) if cmp in df: df[cmp].to_excel(writer, sheet_name=vsc, startrow=6) return
def fill_spreadsheets(projdir=False, sorteddir=False, sheetsdir=False): if vsc in df: df[vsc].to_excel(writer, sheet_name=vsc, startrow=6) if cmp in df: df[cmp].to_excel(writer, sheet_name=cmp, startrow=6) return
442
https://:@github.com/EntilZha/ScalaFunctional.git
8426ff978b84cb4125052ad842ae5db64eaf42f3
@@ -185,6 +185,6 @@ class TestStreams(unittest.TestCase): # test insert into a connection with sqlite3.connect(tmp_path) as conn: - seq(elements).to_sqlite3(tmp_path, insert_sql) + seq(elements).to_sqlite3(conn, insert_sql) result = seq.sqlite3(conn, "SELECT id, name FROM user;").to_list() self.assertListEqual(elements, result)
functional/test/test_streams.py
ReplaceText(target='conn' @(188,37)->(188,45))
class TestStreams(unittest.TestCase): # test insert into a connection with sqlite3.connect(tmp_path) as conn: seq(elements).to_sqlite3(tmp_path, insert_sql) result = seq.sqlite3(conn, "SELECT id, name FROM user;").to_list() self.assertListEqual(elements, result)
class TestStreams(unittest.TestCase): # test insert into a connection with sqlite3.connect(tmp_path) as conn: seq(elements).to_sqlite3(conn, insert_sql) result = seq.sqlite3(conn, "SELECT id, name FROM user;").to_list() self.assertListEqual(elements, result)
443
https://:@github.com/wesselb/stheno.git
32d55bf855f88067e689684eaa5d6f9e8c7604d6
@@ -80,7 +80,7 @@ class Kernel(Referentiable): def feat_map(x): scale = 2 * B.pi / B.cast(period, x.dtype) return B.concatenate((B.sin(x * scale), - B.cos(x * scale)), axis=0) + B.cos(x * scale)), axis=1) return Kernel(lambda x, y: self.f(feat_map(x), feat_map(y)))
stheno/kernel.py
ReplaceText(target='1' @(83,59)->(83,60))
class Kernel(Referentiable): def feat_map(x): scale = 2 * B.pi / B.cast(period, x.dtype) return B.concatenate((B.sin(x * scale), B.cos(x * scale)), axis=0) return Kernel(lambda x, y: self.f(feat_map(x), feat_map(y)))
class Kernel(Referentiable): def feat_map(x): scale = 2 * B.pi / B.cast(period, x.dtype) return B.concatenate((B.sin(x * scale), B.cos(x * scale)), axis=1) return Kernel(lambda x, y: self.f(feat_map(x), feat_map(y)))
444
https://:@github.com/djgagne/hagelslag.git
7ef4f68645a7b7146f21813f2b39a0a7208b0fdb
@@ -19,7 +19,7 @@ class Watershed(object): self.max_intensity = max_intensity def label(self, data): - core_labels, n_labels = label(data <= self.max_intensity) + core_labels, n_labels = label(data >= self.max_intensity) ws_labels = watershed(data.max() - data, markers=core_labels, mask=data >= self.min_intensity) return ws_labels
hagelslag/processing/Watershed.py
ReplaceText(target='>=' @(22,43)->(22,45))
class Watershed(object): self.max_intensity = max_intensity def label(self, data): core_labels, n_labels = label(data <= self.max_intensity) ws_labels = watershed(data.max() - data, markers=core_labels, mask=data >= self.min_intensity) return ws_labels
class Watershed(object): self.max_intensity = max_intensity def label(self, data): core_labels, n_labels = label(data >= self.max_intensity) ws_labels = watershed(data.max() - data, markers=core_labels, mask=data >= self.min_intensity) return ws_labels
445
https://:@github.com/djgagne/hagelslag.git
28dbda86b4244802a1651a808dfcfe0dbdeb62e3
@@ -19,7 +19,7 @@ class Watershed(object): self.max_intensity = max_intensity def label(self, data): - core_labels, n_labels = label(data >= self.max_intensity) + core_labels, n_labels = label(data <= self.max_intensity) ws_labels = watershed(data.max() - data, markers=core_labels, mask=data >= self.min_intensity) return ws_labels
hagelslag/processing/Watershed.py
ReplaceText(target='<=' @(22,43)->(22,45))
class Watershed(object): self.max_intensity = max_intensity def label(self, data): core_labels, n_labels = label(data >= self.max_intensity) ws_labels = watershed(data.max() - data, markers=core_labels, mask=data >= self.min_intensity) return ws_labels
class Watershed(object): self.max_intensity = max_intensity def label(self, data): core_labels, n_labels = label(data <= self.max_intensity) ws_labels = watershed(data.max() - data, markers=core_labels, mask=data >= self.min_intensity) return ws_labels
446
https://:@github.com/djgagne/hagelslag.git
be189c11c1135f782bb30529f58dff78e99f4c8e
@@ -19,7 +19,7 @@ class Watershed(object): self.max_intensity = max_intensity def label(self, data): - core_labels, n_labels = label(data <= self.max_intensity) + core_labels, n_labels = label(data >= self.max_intensity) ws_labels = watershed(data.max() - data, markers=core_labels, mask=data >= self.min_intensity) return ws_labels
hagelslag/processing/Watershed.py
ReplaceText(target='>=' @(22,43)->(22,45))
class Watershed(object): self.max_intensity = max_intensity def label(self, data): core_labels, n_labels = label(data <= self.max_intensity) ws_labels = watershed(data.max() - data, markers=core_labels, mask=data >= self.min_intensity) return ws_labels
class Watershed(object): self.max_intensity = max_intensity def label(self, data): core_labels, n_labels = label(data >= self.max_intensity) ws_labels = watershed(data.max() - data, markers=core_labels, mask=data >= self.min_intensity) return ws_labels
447
https://:@github.com/ICRAR/daliuge.git
72b08c308f61bc4e9006976fc9e63f3638fad9e8
@@ -604,7 +604,7 @@ def chiles_pg(): total_bandwidth = 480 num_obs = 8 # the same as num of data island subband_width = 60 # MHz - num_subb = total_bandwidth / subband_width + num_subb = total_bandwidth // subband_width subband_dict = collections.defaultdict(list) # for corner turning img_list = [] start_freq = 940
test/graphsRepository.py
ReplaceText(target='//' @(607,31)->(607,32))
def chiles_pg(): total_bandwidth = 480 num_obs = 8 # the same as num of data island subband_width = 60 # MHz num_subb = total_bandwidth / subband_width subband_dict = collections.defaultdict(list) # for corner turning img_list = [] start_freq = 940
def chiles_pg(): total_bandwidth = 480 num_obs = 8 # the same as num of data island subband_width = 60 # MHz num_subb = total_bandwidth // subband_width subband_dict = collections.defaultdict(list) # for corner turning img_list = [] start_freq = 940
448
https://:@github.com/ICRAR/daliuge.git
00eb7a92f6679df09650e2e8054e9163f0089785
@@ -115,7 +115,7 @@ class TestDM(unittest.TestCase): a.setCompleted() for dm, drop in (dm1,a), (dm2,b), (dm2,c): - self.assertEqual(DROPStates.COMPLETED, dm.get_drop_property(sessionId, 'status', drop.uid)) + self.assertEqual(DROPStates.COMPLETED, dm.get_drop_property(sessionId, drop.uid, 'status')) self.assertEqual(a.checksum, int(droputils.allDropContents(c))) for dropProxy in a,b,c:
test/manager/test_dm.py
ArgSwap(idxs=1<->2 @(118,51)->(118,71))
class TestDM(unittest.TestCase): a.setCompleted() for dm, drop in (dm1,a), (dm2,b), (dm2,c): self.assertEqual(DROPStates.COMPLETED, dm.get_drop_property(sessionId, 'status', drop.uid)) self.assertEqual(a.checksum, int(droputils.allDropContents(c))) for dropProxy in a,b,c:
class TestDM(unittest.TestCase): a.setCompleted() for dm, drop in (dm1,a), (dm2,b), (dm2,c): self.assertEqual(DROPStates.COMPLETED, dm.get_drop_property(sessionId, drop.uid, 'status')) self.assertEqual(a.checksum, int(droputils.allDropContents(c))) for dropProxy in a,b,c:
449
https://:@github.com/ICRAR/daliuge.git
a45bf6f0b7e2fa2627b7e4faa18324aa1087d8f5
@@ -167,7 +167,7 @@ class DockerTests(unittest.TestCase): c = FileDROP('c', 'c') b.addInput(a) b.addOutput(c) - with DROPWaiterCtx(self, b, 100): + with DROPWaiterCtx(self, c, 100): a.setCompleted() self.assertEqual(six.b(a.dataURL), droputils.allDropContents(c))
test/apps/test_docker.py
ReplaceText(target='c' @(170,33)->(170,34))
class DockerTests(unittest.TestCase): c = FileDROP('c', 'c') b.addInput(a) b.addOutput(c) with DROPWaiterCtx(self, b, 100): a.setCompleted() self.assertEqual(six.b(a.dataURL), droputils.allDropContents(c))
class DockerTests(unittest.TestCase): c = FileDROP('c', 'c') b.addInput(a) b.addOutput(c) with DROPWaiterCtx(self, c, 100): a.setCompleted() self.assertEqual(six.b(a.dataURL), droputils.allDropContents(c))
450
https://:@github.com/ICRAR/daliuge.git
882b2feb9672662c5347bf0b11ce06b0e7529be8
@@ -512,7 +512,7 @@ class LogParser(object): for dim_log_f in possible_logs: if (os.path.exists(dim_log_f)): self._dim_log_f = [dim_log_f] - if (dim_log_f == possible_logs[1]): + if (dim_log_f == possible_logs[0]): cluster_log = os.path.join(log_dir, '0', 'start_dlg_cluster.log') if (os.path.exists(cluster_log)): self._dim_log_f.append(cluster_log)
dlg/deploy/pawsey/scale_test.py
ReplaceText(target='0' @(515,47)->(515,48))
class LogParser(object): for dim_log_f in possible_logs: if (os.path.exists(dim_log_f)): self._dim_log_f = [dim_log_f] if (dim_log_f == possible_logs[1]): cluster_log = os.path.join(log_dir, '0', 'start_dlg_cluster.log') if (os.path.exists(cluster_log)): self._dim_log_f.append(cluster_log)
class LogParser(object): for dim_log_f in possible_logs: if (os.path.exists(dim_log_f)): self._dim_log_f = [dim_log_f] if (dim_log_f == possible_logs[0]): cluster_log = os.path.join(log_dir, '0', 'start_dlg_cluster.log') if (os.path.exists(cluster_log)): self._dim_log_f.append(cluster_log)
451
https://:@github.com/ICRAR/daliuge.git
f1204971537d6fa5e972cd96c963f907166dd291
@@ -952,7 +952,7 @@ class KFamilyPartition(Partition): kwargs['weight'] = self_global_dag.node[u].get('weight', 5) self._dag.add_node(u, **kwargs) for k in self._w_attr: - self._tmp_max_dop[_w_attr] = get_max_weighted_antichain(self._dag, w_attr=k)[0] + self._tmp_max_dop[k] = get_max_weighted_antichain(self._dag, w_attr=k)[0] self._max_dop = self._tmp_max_dop def can_merge(self, that, u, v):
dlg/dropmake/scheduler.py
ReplaceText(target='k' @(955,30)->(955,37))
class KFamilyPartition(Partition): kwargs['weight'] = self_global_dag.node[u].get('weight', 5) self._dag.add_node(u, **kwargs) for k in self._w_attr: self._tmp_max_dop[_w_attr] = get_max_weighted_antichain(self._dag, w_attr=k)[0] self._max_dop = self._tmp_max_dop def can_merge(self, that, u, v):
class KFamilyPartition(Partition): kwargs['weight'] = self_global_dag.node[u].get('weight', 5) self._dag.add_node(u, **kwargs) for k in self._w_attr: self._tmp_max_dop[k] = get_max_weighted_antichain(self._dag, w_attr=k)[0] self._max_dop = self._tmp_max_dop def can_merge(self, that, u, v):
452
https://:@github.com/ICRAR/daliuge.git
6a91d4338a9a90bc2413e4e78c9ed8ca02264ae4
@@ -2225,7 +2225,7 @@ def partition(pgt, algo, num_partitions=1, num_islands=1, elif algo == ALGO_MIN_NUM_PARTS: time_greedy = 1 - time_greedy / 100.0 # assuming between 1 to 100 - pgt = MinNumPartsPGTP(pgt, deadline, num_partitions, partition_label, max_dop, merge_parts=could_merge, optimistic_factor=time_greedy) + pgt = MinNumPartsPGTP(pgt, deadline, num_partitions, partition_label, max_cpu, merge_parts=could_merge, optimistic_factor=time_greedy) elif algo == ALGO_PSO: pgt = PSOPGTP(pgt, partition_label, max_dop, deadline=deadline, topk=topk, swarm_size=swarm_size, merge_parts=could_merge)
dlg/dropmake/pg_generator.py
ReplaceText(target='max_cpu' @(2228,79)->(2228,86))
def partition(pgt, algo, num_partitions=1, num_islands=1, elif algo == ALGO_MIN_NUM_PARTS: time_greedy = 1 - time_greedy / 100.0 # assuming between 1 to 100 pgt = MinNumPartsPGTP(pgt, deadline, num_partitions, partition_label, max_dop, merge_parts=could_merge, optimistic_factor=time_greedy) elif algo == ALGO_PSO: pgt = PSOPGTP(pgt, partition_label, max_dop, deadline=deadline, topk=topk, swarm_size=swarm_size, merge_parts=could_merge)
def partition(pgt, algo, num_partitions=1, num_islands=1, elif algo == ALGO_MIN_NUM_PARTS: time_greedy = 1 - time_greedy / 100.0 # assuming between 1 to 100 pgt = MinNumPartsPGTP(pgt, deadline, num_partitions, partition_label, max_cpu, merge_parts=could_merge, optimistic_factor=time_greedy) elif algo == ALGO_PSO: pgt = PSOPGTP(pgt, partition_label, max_dop, deadline=deadline, topk=topk, swarm_size=swarm_size, merge_parts=could_merge)
453
https://:@github.com/ICRAR/daliuge.git
885ea31e59129d694329161da7acf7e8f2654348
@@ -96,7 +96,7 @@ def check_hosts(ips, port, timeout=None, check_with_session=False, retry=1): logger.info("Host %s:%d is running", ip, port) return ip logger.warning("Failed to contact host %s:%d", ip, port) - ntries -= 0 + ntries -= 1 return None # Don't return None values
dlg/deploy/pawsey/start_dfms_cluster.py
ReplaceText(target='1' @(99,22)->(99,23))
def check_hosts(ips, port, timeout=None, check_with_session=False, retry=1): logger.info("Host %s:%d is running", ip, port) return ip logger.warning("Failed to contact host %s:%d", ip, port) ntries -= 0 return None # Don't return None values
def check_hosts(ips, port, timeout=None, check_with_session=False, retry=1): logger.info("Host %s:%d is running", ip, port) return ip logger.warning("Failed to contact host %s:%d", ip, port) ntries -= 1 return None # Don't return None values
454
https://:@github.com/ICRAR/daliuge.git
ca615527deef8c147aaad3c64755b5f3d89b65b8
@@ -56,7 +56,7 @@ def timed_import(module_name): """Imports `module_name` and log how long it took to import it""" start = time.time() module = importlib.import_module(module_name) - logger.info('Imported %s in %.3f seconds', module, time.time() - start) + logger.info('Imported %s in %.3f seconds', module_name, time.time() - start) return module
dlg/utils.py
ReplaceText(target='module_name' @(59,47)->(59,53))
def timed_import(module_name): """Imports `module_name` and log how long it took to import it""" start = time.time() module = importlib.import_module(module_name) logger.info('Imported %s in %.3f seconds', module, time.time() - start) return module
def timed_import(module_name): """Imports `module_name` and log how long it took to import it""" start = time.time() module = importlib.import_module(module_name) logger.info('Imported %s in %.3f seconds', module_name, time.time() - start) return module
455
https://:@github.com/JRCSTU/co2mpas-ta.git
8e8557c3590acc6942bcabc7167de3767681e48b
@@ -419,7 +419,7 @@ def define_alternator_status_model( if soc < dn_soc or (prev_status == 1 and soc < up_soc): status = 1 - elif has_energy_recuperation and gear_box_power_in >= 0: + elif has_energy_recuperation and gear_box_power_in < 0: status = 2 return status
co2mpas/functions/physical/electrics/__init__.py
ReplaceText(target='<' @(422,63)->(422,65))
def define_alternator_status_model( if soc < dn_soc or (prev_status == 1 and soc < up_soc): status = 1 elif has_energy_recuperation and gear_box_power_in >= 0: status = 2 return status
def define_alternator_status_model( if soc < dn_soc or (prev_status == 1 and soc < up_soc): status = 1 elif has_energy_recuperation and gear_box_power_in < 0: status = 2 return status
456
https://:@github.com/JRCSTU/co2mpas-ta.git
b155780da4cd2489da6c06da12e0c4df41534ab5
@@ -2766,8 +2766,8 @@ class Dispatcher(object): elif node_id in dists: # The node w already estimated. if dist < dists[node_id]: # Error for negative paths. - raise DispatcherError('Contradictory paths found: ' - 'negative weights?', self) + raise DispatcherError(self, 'Contradictory paths found: ' + 'negative weights?') elif node_id not in seen or dist < seen[node_id]: # Check min dist. seen[node_id] = dist # Update dist.
co2mpas/dispatcher/__init__.py
ArgSwap(idxs=0<->1 @(2769,22)->(2769,37))
class Dispatcher(object): elif node_id in dists: # The node w already estimated. if dist < dists[node_id]: # Error for negative paths. raise DispatcherError('Contradictory paths found: ' 'negative weights?', self) elif node_id not in seen or dist < seen[node_id]: # Check min dist. seen[node_id] = dist # Update dist.
class Dispatcher(object): elif node_id in dists: # The node w already estimated. if dist < dists[node_id]: # Error for negative paths. raise DispatcherError(self, 'Contradictory paths found: ' 'negative weights?') elif node_id not in seen or dist < seen[node_id]: # Check min dist. seen[node_id] = dist # Update dist.
457
https://:@github.com/JRCSTU/co2mpas-ta.git
46830a3edd1490d499b8f0e788ce87efe873d264
@@ -225,7 +225,7 @@ def _predict_electrics( alternator_current = calculate_alternator_current( alternator_status, on_engine, gear_box_power_in, alternator_current_model, engine_start_current, - prev_battery_current, acceleration) + battery_state_of_charge, acceleration) battery_current = calculate_battery_current( electric_load, alternator_current, alternator_nominal_voltage,
co2mpas/functions/co2mpas_model/physical/electrics/electrics_prediction.py
ReplaceText(target='battery_state_of_charge' @(228,8)->(228,28))
def _predict_electrics( alternator_current = calculate_alternator_current( alternator_status, on_engine, gear_box_power_in, alternator_current_model, engine_start_current, prev_battery_current, acceleration) battery_current = calculate_battery_current( electric_load, alternator_current, alternator_nominal_voltage,
def _predict_electrics( alternator_current = calculate_alternator_current( alternator_status, on_engine, gear_box_power_in, alternator_current_model, engine_start_current, battery_state_of_charge, acceleration) battery_current = calculate_battery_current( electric_load, alternator_current, alternator_nominal_voltage,
458
https://:@github.com/JRCSTU/co2mpas-ta.git
45e4f0782888c84b9c99842db88457353b45efb3
@@ -240,7 +240,7 @@ def define_data_schema(read=True): 'f0_uncorrected': positive, 'f2': positive, 'f0': positive, - 'correct_f0': positive, + 'correct_f0': _bool, 'co2_emission_low': positive, 'co2_emission_medium': positive,
co2mpas/functions/io/schema.py
ReplaceText(target='_bool' @(243,22)->(243,30))
def define_data_schema(read=True): 'f0_uncorrected': positive, 'f2': positive, 'f0': positive, 'correct_f0': positive, 'co2_emission_low': positive, 'co2_emission_medium': positive,
def define_data_schema(read=True): 'f0_uncorrected': positive, 'f2': positive, 'f0': positive, 'correct_f0': _bool, 'co2_emission_low': positive, 'co2_emission_medium': positive,
459
https://:@github.com/JRCSTU/co2mpas-ta.git
3fcd6ce4395980ea879bde8f1270e390e750a8ee
@@ -3020,7 +3020,7 @@ class Dispatcher(object): self._meet[dsp_id] = initial_dist # Set view distance. # Check if inputs are satisfied. - if self.check_wait_in(node['wait_inputs'], node_id): + if self.check_wait_in(node['wait_inputs'], dsp_id): return False # Pass the node if dsp_id not in distances:
co2mpas/dispatcher/__init__.py
ReplaceText(target='dsp_id' @(3023,51)->(3023,58))
class Dispatcher(object): self._meet[dsp_id] = initial_dist # Set view distance. # Check if inputs are satisfied. if self.check_wait_in(node['wait_inputs'], node_id): return False # Pass the node if dsp_id not in distances:
class Dispatcher(object): self._meet[dsp_id] = initial_dist # Set view distance. # Check if inputs are satisfied. if self.check_wait_in(node['wait_inputs'], dsp_id): return False # Pass the node if dsp_id not in distances:
460
https://:@github.com/JRCSTU/co2mpas-ta.git
e3346285e51b0bba0d909746146e0be70c3090eb
@@ -87,7 +87,7 @@ def calculate_full_load(full_load_speeds, full_load_powers, idle_engine_speed): """ pn = np.array((full_load_speeds, full_load_powers)) - max_speed_at_max_power, max_power = pn[:, np.argmax(pn[0])] + max_speed_at_max_power, max_power = pn[:, np.argmax(pn[1])] pn[1] /= max_power idle = idle_engine_speed[0] pn[0] = (pn[0] - idle) / (max_speed_at_max_power - idle)
co2mpas/model/physical/engine/__init__.py
ReplaceText(target='1' @(90,59)->(90,60))
def calculate_full_load(full_load_speeds, full_load_powers, idle_engine_speed): """ pn = np.array((full_load_speeds, full_load_powers)) max_speed_at_max_power, max_power = pn[:, np.argmax(pn[0])] pn[1] /= max_power idle = idle_engine_speed[0] pn[0] = (pn[0] - idle) / (max_speed_at_max_power - idle)
def calculate_full_load(full_load_speeds, full_load_powers, idle_engine_speed): """ pn = np.array((full_load_speeds, full_load_powers)) max_speed_at_max_power, max_power = pn[:, np.argmax(pn[1])] pn[1] /= max_power idle = idle_engine_speed[0] pn[0] = (pn[0] - idle) / (max_speed_at_max_power - idle)
461
https://:@github.com/JRCSTU/co2mpas-ta.git
4154efcd8980a1790f2675afa14803991b4da76e
@@ -1223,7 +1223,7 @@ def calibrate_co2_params( p = restrict_bounds(p) - p, s = calibrate_model_params(co2_error_function_on_phases, p) + p, s = calibrate_model_params(co2_error_function_on_emissions, p) success.append((s, copy.deepcopy(p))) _set_attr(p, vary)
co2mpas/model/physical/engine/co2_emission.py
ReplaceText(target='co2_error_function_on_emissions' @(1226,34)->(1226,62))
def calibrate_co2_params( p = restrict_bounds(p) p, s = calibrate_model_params(co2_error_function_on_phases, p) success.append((s, copy.deepcopy(p))) _set_attr(p, vary)
def calibrate_co2_params( p = restrict_bounds(p) p, s = calibrate_model_params(co2_error_function_on_emissions, p) success.append((s, copy.deepcopy(p))) _set_attr(p, vary)
462
https://:@github.com/JRCSTU/co2mpas-ta.git
4c077512de9127f377b3802d2c82fe8ebd56f5c2
@@ -703,7 +703,7 @@ def define_data_schema(read=True): 'alternator_powers_demand': np_array, 'alternator_statuses': np_array_int, 'auxiliaries_power_losses': np_array, - 'auxiliaries_torque_loss': positive, + 'auxiliaries_torque_loss': tuplefloat, 'auxiliaries_torque_losses': np_array, 'battery_currents': np_array, 'clutch_tc_powers': np_array,
co2mpas/io/schema.py
ReplaceText(target='tuplefloat' @(706,35)->(706,43))
def define_data_schema(read=True): 'alternator_powers_demand': np_array, 'alternator_statuses': np_array_int, 'auxiliaries_power_losses': np_array, 'auxiliaries_torque_loss': positive, 'auxiliaries_torque_losses': np_array, 'battery_currents': np_array, 'clutch_tc_powers': np_array,
def define_data_schema(read=True): 'alternator_powers_demand': np_array, 'alternator_statuses': np_array_int, 'auxiliaries_power_losses': np_array, 'auxiliaries_torque_loss': tuplefloat, 'auxiliaries_torque_losses': np_array, 'battery_currents': np_array, 'clutch_tc_powers': np_array,
463
https://:@github.com/JRCSTU/co2mpas-ta.git
274f898a173aa42185fa5ef138035b4cb5994d28
@@ -74,7 +74,7 @@ class TestGearBox(unittest.TestCase): def test_calculate_torque_out(self): wp, es, gbs = self.wp, self.es, self.ws self.assertEquals( - list(calculate_gear_box_torques(wp, es, gbs, 10)), list(self.tgb) + list(calculate_gear_box_torques(wp, gbs, es, 10)), list(self.tgb) ) @unittest.skip("to be reviewed")
tests/functions/test_gear_box.py
ArgSwap(idxs=1<->2 @(77,17)->(77,43))
class TestGearBox(unittest.TestCase): def test_calculate_torque_out(self): wp, es, gbs = self.wp, self.es, self.ws self.assertEquals( list(calculate_gear_box_torques(wp, es, gbs, 10)), list(self.tgb) ) @unittest.skip("to be reviewed")
class TestGearBox(unittest.TestCase): def test_calculate_torque_out(self): wp, es, gbs = self.wp, self.es, self.ws self.assertEquals( list(calculate_gear_box_torques(wp, gbs, es, 10)), list(self.tgb) ) @unittest.skip("to be reviewed")
464
https://:@github.com/JRCSTU/co2mpas-ta.git
739964622f68661a4dc35b8a60a30db5cb8475b2
@@ -2608,7 +2608,7 @@ class Co2guiCmd(cmdlets.Cmd): progr_bar.grid(column=1, row=1, sticky='nswe') if step is not None: - if step < 0: + if step <= 0: progr_var.set(-step) else: progr_var.set(progr_var.get() + step)
co2mpas/co2gui/__init__.py
ReplaceText(target='<=' @(2611,20)->(2611,21))
class Co2guiCmd(cmdlets.Cmd): progr_bar.grid(column=1, row=1, sticky='nswe') if step is not None: if step < 0: progr_var.set(-step) else: progr_var.set(progr_var.get() + step)
class Co2guiCmd(cmdlets.Cmd): progr_bar.grid(column=1, row=1, sticky='nswe') if step is not None: if step <= 0: progr_var.set(-step) else: progr_var.set(progr_var.get() + step)
465
https://:@github.com/JRCSTU/co2mpas-ta.git
18af1fede3536121930c99ea0a2c94e8ffeb3bf7
@@ -450,6 +450,6 @@ def calculate_drive_battery_currents_v2( n_p, n_s = drive_battery_n_parallel_cells, drive_battery_n_series_cells p = drive_battery_electric_powers r0, ocv = drive_battery_r0, drive_battery_ocv - x = ocv + np.nan_to_num(np.sqrt(ocv ** 2 - (4e3 * r0 / (n_s * n_p)) * p)) + x = ocv - np.nan_to_num(np.sqrt(ocv ** 2 - (4e3 * r0 / (n_s * n_p)) * p)) x *= n_p / (2 * r0) return x
co2mpas/core/model/physical/electrics/batteries/drive.py
ReplaceText(target='-' @(453,12)->(453,13))
def calculate_drive_battery_currents_v2( n_p, n_s = drive_battery_n_parallel_cells, drive_battery_n_series_cells p = drive_battery_electric_powers r0, ocv = drive_battery_r0, drive_battery_ocv x = ocv + np.nan_to_num(np.sqrt(ocv ** 2 - (4e3 * r0 / (n_s * n_p)) * p)) x *= n_p / (2 * r0) return x
def calculate_drive_battery_currents_v2( n_p, n_s = drive_battery_n_parallel_cells, drive_battery_n_series_cells p = drive_battery_electric_powers r0, ocv = drive_battery_r0, drive_battery_ocv x = ocv - np.nan_to_num(np.sqrt(ocv ** 2 - (4e3 * r0 / (n_s * n_p)) * p)) x *= n_p / (2 * r0) return x
466
https://:@github.com/JRCSTU/co2mpas-ta.git
8f4cfb4afa97fc43a95b62b497f182fd72b0e379
@@ -263,7 +263,7 @@ def calculate_service_battery_loads( Service battery load vector [kW]. :rtype: numpy.array """ - p = service_battery_electric_powers - service_battery_electric_powers_supply + p = service_battery_electric_powers + service_battery_electric_powers_supply return p
co2mpas/core/model/physical/electrics/batteries/service/__init__.py
ReplaceText(target='+' @(266,40)->(266,41))
def calculate_service_battery_loads( Service battery load vector [kW]. :rtype: numpy.array """ p = service_battery_electric_powers - service_battery_electric_powers_supply return p
def calculate_service_battery_loads( Service battery load vector [kW]. :rtype: numpy.array """ p = service_battery_electric_powers + service_battery_electric_powers_supply return p
467
https://:@github.com/JRCSTU/co2mpas-ta.git
52695d79acea053f5ff38a8a5d223f1907d33fb8
@@ -46,7 +46,7 @@ def calculate_final_drive_ratios(final_drive_ratio, n_gears=1): # noinspection PyUnusedLocal,PyMissingOrEmptyDocstring def is_not_manual_or_automatic(gear_box_type, *args): - return gear_box_type in ('manual', 'automatic') + return gear_box_type not in ('manual', 'automatic') dsp.add_function(
co2mpas/core/model/physical/final_drive.py
ReplaceText(target=' not in ' @(49,24)->(49,28))
def calculate_final_drive_ratios(final_drive_ratio, n_gears=1): # noinspection PyUnusedLocal,PyMissingOrEmptyDocstring def is_not_manual_or_automatic(gear_box_type, *args): return gear_box_type in ('manual', 'automatic') dsp.add_function(
def calculate_final_drive_ratios(final_drive_ratio, n_gears=1): # noinspection PyUnusedLocal,PyMissingOrEmptyDocstring def is_not_manual_or_automatic(gear_box_type, *args): return gear_box_type not in ('manual', 'automatic') dsp.add_function(
468
https://:@github.com/JRCSTU/co2mpas-ta.git
401ec07523c5b34dacab9e73cf9a417dada880cb
@@ -429,7 +429,7 @@ class CorrectGear: # 3.2 j = i + np.searchsorted(times[i:], times[i] + 1) - if not gear and up_clip(velocities, j + 1) >= up_clip(velocities, j): + if not gear and up_clip(velocities, j + 1) > up_clip(velocities, j): gear = self.min_gear return gear
co2mpas/core/model/physical/gear_box/at_gear/__init__.py
ReplaceText(target='>' @(432,51)->(432,53))
class CorrectGear: # 3.2 j = i + np.searchsorted(times[i:], times[i] + 1) if not gear and up_clip(velocities, j + 1) >= up_clip(velocities, j): gear = self.min_gear return gear
class CorrectGear: # 3.2 j = i + np.searchsorted(times[i:], times[i] + 1) if not gear and up_clip(velocities, j + 1) > up_clip(velocities, j): gear = self.min_gear return gear
469
https://:@github.com/JRCSTU/co2mpas-ta.git
745c3623fffca5cf7f84358f8fff87287a35525c
@@ -125,7 +125,7 @@ def define_tau_function(after_treatment_temperature_threshold): f = sci_sta.lognorm(max(s, dfl.EPS), 0, temp_mean).cdf def _tau_function(t0, t1, temp): - return t0 - (t1 - t0) * f(temp + 273) + return t0 + (t1 - t0) * f(temp + 273) return _tau_function
co2mpas/core/model/physical/engine/fc.py
ReplaceText(target='+' @(128,18)->(128,19))
def define_tau_function(after_treatment_temperature_threshold): f = sci_sta.lognorm(max(s, dfl.EPS), 0, temp_mean).cdf def _tau_function(t0, t1, temp): return t0 - (t1 - t0) * f(temp + 273) return _tau_function
def define_tau_function(after_treatment_temperature_threshold): f = sci_sta.lognorm(max(s, dfl.EPS), 0, temp_mean).cdf def _tau_function(t0, t1, temp): return t0 + (t1 - t0) * f(temp + 273) return _tau_function
470
https://:@github.com/RasaHQ/rasa.git
57d15923475ec8b5361cd6f84f327d864253746a
@@ -33,7 +33,7 @@ def run_train(_config): def load_interpreter_for_model(nlp, config, persisted_path): metadata = DataRouter.read_model_metadata(persisted_path, config) - return DataRouter.create_interpreter(nlp, metadata) + return DataRouter.create_interpreter(metadata, nlp) class ResponseTest(object):
_pytest/utilities.py
ArgSwap(idxs=0<->1 @(36,11)->(36,40))
def run_train(_config): def load_interpreter_for_model(nlp, config, persisted_path): metadata = DataRouter.read_model_metadata(persisted_path, config) return DataRouter.create_interpreter(nlp, metadata) class ResponseTest(object):
def run_train(_config): def load_interpreter_for_model(nlp, config, persisted_path): metadata = DataRouter.read_model_metadata(persisted_path, config) return DataRouter.create_interpreter(metadata, nlp) class ResponseTest(object):
471
https://:@github.com/RasaHQ/rasa.git
bb0b24e56a97affbfa3db91be71782f293e9e5c2
@@ -29,7 +29,7 @@ def test_luis_data_without_tokenizer(): def test_wit_data(): td = load_data('data/examples/wit/demo-flights.json', "en") assert td.entity_examples != [] - assert td.intent_examples != [] + assert td.intent_examples == [] assert td.entity_synonyms == {}
_pytest/test_training_data.py
ReplaceText(target='==' @(32,30)->(32,32))
def test_luis_data_without_tokenizer(): def test_wit_data(): td = load_data('data/examples/wit/demo-flights.json', "en") assert td.entity_examples != [] assert td.intent_examples != [] assert td.entity_synonyms == {}
def test_luis_data_without_tokenizer(): def test_wit_data(): td = load_data('data/examples/wit/demo-flights.json', "en") assert td.entity_examples != [] assert td.intent_examples == [] assert td.entity_synonyms == {}
472
https://:@github.com/RasaHQ/rasa.git
12484270fb8c3c271d74c5b3269f287eaf2d7cfb
@@ -84,7 +84,7 @@ class SklearnIntentClassifier(Component): # dirty str fix because sklearn is expecting str not instance of basestr... tuned_parameters = [{'C': [1, 2, 5, 10, 20, 100], 'kernel': [str('linear')]}] - cv_splits = max(2, min(MAX_CV_FOLDS, np.min(np.bincount(y)) / 5)) # aim for at least 5 examples in each fold + cv_splits = max(2, min(MAX_CV_FOLDS, np.min(np.bincount(y)) // 5)) # aim for at least 5 examples in each fold self.clf = GridSearchCV(SVC(C=1, probability=True), param_grid=tuned_parameters, n_jobs=num_threads,
rasa_nlu/classifiers/sklearn_intent_classifier.py
ReplaceText(target='//' @(87,68)->(87,69))
class SklearnIntentClassifier(Component): # dirty str fix because sklearn is expecting str not instance of basestr... tuned_parameters = [{'C': [1, 2, 5, 10, 20, 100], 'kernel': [str('linear')]}] cv_splits = max(2, min(MAX_CV_FOLDS, np.min(np.bincount(y)) / 5)) # aim for at least 5 examples in each fold self.clf = GridSearchCV(SVC(C=1, probability=True), param_grid=tuned_parameters, n_jobs=num_threads,
class SklearnIntentClassifier(Component): # dirty str fix because sklearn is expecting str not instance of basestr... tuned_parameters = [{'C': [1, 2, 5, 10, 20, 100], 'kernel': [str('linear')]}] cv_splits = max(2, min(MAX_CV_FOLDS, np.min(np.bincount(y)) // 5)) # aim for at least 5 examples in each fold self.clf = GridSearchCV(SVC(C=1, probability=True), param_grid=tuned_parameters, n_jobs=num_threads,
473
https://:@github.com/RasaHQ/rasa.git
b5f9b6ad06ff52d75522b49c745f3e83c32ee7cd
@@ -135,7 +135,7 @@ class TrainingData(object): logger.info("Training data stats: \n" + "\t- intent examples: {} ({} distinct intents)\n".format( self.num_intent_examples, len(different_intents)) + - "\t- found intents: {}\n".format(list_to_str(different_entities)) + + "\t- found intents: {}\n".format(list_to_str(different_intents)) + "\t- entity examples: {} ({} distinct entities)\n".format( self.num_entity_examples, len(different_entities)) + "\t- found entities: {}\n".format(list_to_str(different_entities)))
rasa_nlu/training_data.py
ReplaceText(target='different_intents' @(138,65)->(138,83))
class TrainingData(object): logger.info("Training data stats: \n" + "\t- intent examples: {} ({} distinct intents)\n".format( self.num_intent_examples, len(different_intents)) + "\t- found intents: {}\n".format(list_to_str(different_entities)) + "\t- entity examples: {} ({} distinct entities)\n".format( self.num_entity_examples, len(different_entities)) + "\t- found entities: {}\n".format(list_to_str(different_entities)))
class TrainingData(object): logger.info("Training data stats: \n" + "\t- intent examples: {} ({} distinct intents)\n".format( self.num_intent_examples, len(different_intents)) + "\t- found intents: {}\n".format(list_to_str(different_intents)) + "\t- entity examples: {} ({} distinct entities)\n".format( self.num_entity_examples, len(different_entities)) + "\t- found entities: {}\n".format(list_to_str(different_entities)))
474
https://:@github.com/RasaHQ/rasa.git
c5e10bd1c504146064183b6b87cfdf80d8617a4d
@@ -28,7 +28,7 @@ class MarkdownToRasa: entities = [] utter = example_in_md for regex in [ent_regex, ent_regex_with_value]: - utter = re.sub(regex, r"\1", example_in_md) # [text](entity) -> text + utter = re.sub(regex, r"\1", utter) # [text](entity) -> text ent_matches = re.finditer(regex, example_in_md) for matchNum, match in enumerate(ent_matches): if 'synonym' in match.groupdict():
rasa_nlu/utils/md_to_rasa.py
ReplaceText(target='utter' @(31,41)->(31,54))
class MarkdownToRasa: entities = [] utter = example_in_md for regex in [ent_regex, ent_regex_with_value]: utter = re.sub(regex, r"\1", example_in_md) # [text](entity) -> text ent_matches = re.finditer(regex, example_in_md) for matchNum, match in enumerate(ent_matches): if 'synonym' in match.groupdict():
class MarkdownToRasa: entities = [] utter = example_in_md for regex in [ent_regex, ent_regex_with_value]: utter = re.sub(regex, r"\1", utter) # [text](entity) -> text ent_matches = re.finditer(regex, example_in_md) for matchNum, match in enumerate(ent_matches): if 'synonym' in match.groupdict():
475
https://:@github.com/RasaHQ/rasa.git
571c59bc9fccca87aa1c29b56ddf874fce9b111a
@@ -87,9 +87,9 @@ class Metadata(object): return [] def for_component(self, name, defaults=None): - return config.component_config_from_pipeline(self.get('pipeline', []), - name, - defaults) + return config.component_config_from_pipeline(name, + self.get('pipeline', []), + defaults) @property def language(self):
rasa_nlu/model.py
ArgSwap(idxs=0<->1 @(90,15)->(90,52))
class Metadata(object): return [] def for_component(self, name, defaults=None): return config.component_config_from_pipeline(self.get('pipeline', []), name, defaults) @property def language(self):
class Metadata(object): return [] def for_component(self, name, defaults=None): return config.component_config_from_pipeline(name, self.get('pipeline', []), defaults) @property def language(self):
476
https://:@github.com/RasaHQ/rasa.git
9a06d81201ca84812540bc4128111c55e22ffca7
@@ -156,7 +156,7 @@ class RasaNLUModelConfig(object): return json_to_string(self.__dict__, indent=4) def for_component(self, name, defaults=None): - return component_config_from_pipeline(self.pipeline, name, defaults) + return component_config_from_pipeline(name, self.pipeline, defaults) @property def component_names(self):
rasa_nlu/config.py
ArgSwap(idxs=0<->1 @(159,15)->(159,45))
class RasaNLUModelConfig(object): return json_to_string(self.__dict__, indent=4) def for_component(self, name, defaults=None): return component_config_from_pipeline(self.pipeline, name, defaults) @property def component_names(self):
class RasaNLUModelConfig(object): return json_to_string(self.__dict__, indent=4) def for_component(self, name, defaults=None): return component_config_from_pipeline(name, self.pipeline, defaults) @property def component_names(self):
477
https://:@github.com/RasaHQ/rasa.git
2d92ef4002144ec4b3d66bd911c26642e5ab698f
@@ -42,7 +42,7 @@ def create_argument_parser(): description='evaluates a dialogue model') parent_parser = argparse.ArgumentParser(add_help=False) add_args_to_parser(parent_parser) - cli.arguments.add_model_and_story_group(parser, + cli.arguments.add_model_and_story_group(parent_parser, allow_pretrained_model=False) utils.add_logging_option_arguments(parent_parser) subparsers = parser.add_subparsers(help='mode', dest='mode')
rasa_core/evaluate.py
ReplaceText(target='parent_parser' @(45,44)->(45,50))
def create_argument_parser(): description='evaluates a dialogue model') parent_parser = argparse.ArgumentParser(add_help=False) add_args_to_parser(parent_parser) cli.arguments.add_model_and_story_group(parser, allow_pretrained_model=False) utils.add_logging_option_arguments(parent_parser) subparsers = parser.add_subparsers(help='mode', dest='mode')
def create_argument_parser(): description='evaluates a dialogue model') parent_parser = argparse.ArgumentParser(add_help=False) add_args_to_parser(parent_parser) cli.arguments.add_model_and_story_group(parent_parser, allow_pretrained_model=False) utils.add_logging_option_arguments(parent_parser) subparsers = parser.add_subparsers(help='mode', dest='mode')
478
https://:@github.com/RasaHQ/rasa.git
665cf94ee30f44aac85e8ac3c5ed8b2b0694354a
@@ -49,7 +49,7 @@ def create_argument_parser(): description='evaluates a dialogue model') parent_parser = argparse.ArgumentParser(add_help=False) add_args_to_parser(parent_parser) - cli.arguments.add_model_and_story_group(parser, + cli.arguments.add_model_and_story_group(parent_parser, allow_pretrained_model=False) utils.add_logging_option_arguments(parent_parser) subparsers = parser.add_subparsers(help='mode', dest='mode')
rasa_core/evaluate.py
ReplaceText(target='parent_parser' @(52,44)->(52,50))
def create_argument_parser(): description='evaluates a dialogue model') parent_parser = argparse.ArgumentParser(add_help=False) add_args_to_parser(parent_parser) cli.arguments.add_model_and_story_group(parser, allow_pretrained_model=False) utils.add_logging_option_arguments(parent_parser) subparsers = parser.add_subparsers(help='mode', dest='mode')
def create_argument_parser(): description='evaluates a dialogue model') parent_parser = argparse.ArgumentParser(add_help=False) add_args_to_parser(parent_parser) cli.arguments.add_model_and_story_group(parent_parser, allow_pretrained_model=False) utils.add_logging_option_arguments(parent_parser) subparsers = parser.add_subparsers(help='mode', dest='mode')
479
https://:@github.com/RasaHQ/rasa.git
37f446c8246c78339727f7b09bd2021906ec8d60
@@ -174,7 +174,7 @@ def test_generate_training_data_original_and_augmented_trackers( hasattr(t, 'is_augmented') or not t.is_augmented ] assert len(original_trackers) == 3 - assert len(original_trackers) <= 33 + assert len(training_trackers) <= 33 def test_visualize_training_data_graph(tmpdir, default_domain):
tests/test_dsl.py
ReplaceText(target='training_trackers' @(177,15)->(177,32))
def test_generate_training_data_original_and_augmented_trackers( hasattr(t, 'is_augmented') or not t.is_augmented ] assert len(original_trackers) == 3 assert len(original_trackers) <= 33 def test_visualize_training_data_graph(tmpdir, default_domain):
def test_generate_training_data_original_and_augmented_trackers( hasattr(t, 'is_augmented') or not t.is_augmented ] assert len(original_trackers) == 3 assert len(training_trackers) <= 33 def test_visualize_training_data_graph(tmpdir, default_domain):
480
https://:@github.com/RasaHQ/rasa.git
31ab3bb5d10a09f8957455909311257b257f44dc
@@ -218,7 +218,7 @@ class TestMemoizationPolicy(PolicyTestCollection): assert recalled == default_domain.index_for_action(actions[0]) for tracker, states, actions \ - in zip(trackers, all_states_augmented, all_actions_augmented): + in zip(augmented_trackers, all_states_augmented, all_actions_augmented): recalled = trained_policy.recall(states, tracker, default_domain) assert recalled == 0
tests/test_policies.py
ReplaceText(target='augmented_trackers' @(221,23)->(221,31))
class TestMemoizationPolicy(PolicyTestCollection): assert recalled == default_domain.index_for_action(actions[0]) for tracker, states, actions \ in zip(trackers, all_states_augmented, all_actions_augmented): recalled = trained_policy.recall(states, tracker, default_domain) assert recalled == 0
class TestMemoizationPolicy(PolicyTestCollection): assert recalled == default_domain.index_for_action(actions[0]) for tracker, states, actions \ in zip(augmented_trackers, all_states_augmented, all_actions_augmented): recalled = trained_policy.recall(states, tracker, default_domain) assert recalled == 0
481
https://:@github.com/RasaHQ/rasa.git
90a98e954b209a05a168e86e78d4ad90e12d8869
@@ -31,7 +31,7 @@ def run(model: Text, endpoints: Text, connector: Text = None, from rasa_core.utils import AvailableEndpoints model_path = get_model(model) - core_path, nlu_path = get_model_subdirectories(model) + core_path, nlu_path = get_model_subdirectories(model_path) _endpoints = AvailableEndpoints.read_endpoints(endpoints) if not connector and not credentials:
rasa/run.py
ReplaceText(target='model_path' @(34,51)->(34,56))
def run(model: Text, endpoints: Text, connector: Text = None, from rasa_core.utils import AvailableEndpoints model_path = get_model(model) core_path, nlu_path = get_model_subdirectories(model) _endpoints = AvailableEndpoints.read_endpoints(endpoints) if not connector and not credentials:
def run(model: Text, endpoints: Text, connector: Text = None, from rasa_core.utils import AvailableEndpoints model_path = get_model(model) core_path, nlu_path = get_model_subdirectories(model_path) _endpoints = AvailableEndpoints.read_endpoints(endpoints) if not connector and not credentials:
482
https://:@github.com/RasaHQ/rasa.git
e43636652c80d0e9d81dc01f6a83ebfc5444ee12
@@ -69,8 +69,8 @@ def test_core( if os.path.exists(core_path) and os.path.exists(nlu_path): _interpreter = NaturalLanguageInterpreter.create(nlu_path, _endpoints.nlu) - _agent = Agent.load(core_path, interpreter=_interpreter) - + _agent = Agent.load(model_path, interpreter=_interpreter) + kwargs = minimal_kwargs(kwargs, rasa.core.test, ["stories", "agent"]) loop.run_until_complete(
rasa/test.py
ReplaceText(target='model_path' @(72,28)->(72,37))
def test_core( if os.path.exists(core_path) and os.path.exists(nlu_path): _interpreter = NaturalLanguageInterpreter.create(nlu_path, _endpoints.nlu) _agent = Agent.load(core_path, interpreter=_interpreter) kwargs = minimal_kwargs(kwargs, rasa.core.test, ["stories", "agent"]) loop.run_until_complete(
def test_core( if os.path.exists(core_path) and os.path.exists(nlu_path): _interpreter = NaturalLanguageInterpreter.create(nlu_path, _endpoints.nlu) _agent = Agent.load(model_path, interpreter=_interpreter) kwargs = minimal_kwargs(kwargs, rasa.core.test, ["stories", "agent"]) loop.run_until_complete(
483
https://:@github.com/RasaHQ/rasa.git
2d74a8355b63f587c6e1a7d69027a84982fe237d
@@ -118,7 +118,7 @@ async def train_comparison_models( file_importer, train_path, policy_config=policy_config, - exclusion_percentage=current_run, + exclusion_percentage=percentage, kwargs=kwargs, dump_stories=dump_stories, )
rasa/core/train.py
ReplaceText(target='percentage' @(121,45)->(121,56))
async def train_comparison_models( file_importer, train_path, policy_config=policy_config, exclusion_percentage=current_run, kwargs=kwargs, dump_stories=dump_stories, )
async def train_comparison_models( file_importer, train_path, policy_config=policy_config, exclusion_percentage=percentage, kwargs=kwargs, dump_stories=dump_stories, )
484
https://:@github.com/RasaHQ/rasa.git
3b51563dc49830f4e5f9a09ebd823c5f7eb563ef
@@ -29,7 +29,7 @@ class Tokenizer(Component): if "use_cls_token" in self.component_config: self.use_cls_token = self.component_config["use_cls_token"] else: - self.use_cls_token = False + self.use_cls_token = True def add_cls_token( self, tokens: List[Token], attribute: Text = MESSAGE_TEXT_ATTRIBUTE
rasa/nlu/tokenizers/tokenizer.py
ReplaceText(target='True' @(32,33)->(32,38))
class Tokenizer(Component): if "use_cls_token" in self.component_config: self.use_cls_token = self.component_config["use_cls_token"] else: self.use_cls_token = False def add_cls_token( self, tokens: List[Token], attribute: Text = MESSAGE_TEXT_ATTRIBUTE
class Tokenizer(Component): if "use_cls_token" in self.component_config: self.use_cls_token = self.component_config["use_cls_token"] else: self.use_cls_token = True def add_cls_token( self, tokens: List[Token], attribute: Text = MESSAGE_TEXT_ATTRIBUTE
485
https://:@github.com/RasaHQ/rasa.git
f96eb791fb2236695a98fd5a4f935c3c5d316fe3
@@ -376,7 +376,7 @@ def test_intent_evaluation_report_large(tmpdir_factory): assert len(report.keys()) == 8 assert report["A"] == a_results - assert result["E"] == e_results + assert report["E"] == e_results def test_response_evaluation_report(tmpdir_factory):
tests/nlu/base/test_evaluation.py
ReplaceText(target='report' @(379,11)->(379,17))
def test_intent_evaluation_report_large(tmpdir_factory): assert len(report.keys()) == 8 assert report["A"] == a_results assert result["E"] == e_results def test_response_evaluation_report(tmpdir_factory):
def test_intent_evaluation_report_large(tmpdir_factory): assert len(report.keys()) == 8 assert report["A"] == a_results assert report["E"] == e_results def test_response_evaluation_report(tmpdir_factory):
486
https://:@github.com/RasaHQ/rasa.git
ff9bb32d79e484cd2cfd7cde0acfa9d0006e14f8
@@ -527,7 +527,7 @@ class DotProductLoss(tf.keras.layers.Layer): tiled = tf.tile(tf.expand_dims(x, 0), (batch_size, 1, 1)) - return tf.gather(tiled, idxs, batch_dims=-1) + return tf.gather(tiled, idxs, batch_dims=1) def _get_bad_mask( self, labels: "tf.Tensor", target_labels: "tf.Tensor", idxs: "tf.Tensor"
rasa/utils/tf_layers.py
ReplaceText(target='1' @(530,49)->(530,51))
class DotProductLoss(tf.keras.layers.Layer): tiled = tf.tile(tf.expand_dims(x, 0), (batch_size, 1, 1)) return tf.gather(tiled, idxs, batch_dims=-1) def _get_bad_mask( self, labels: "tf.Tensor", target_labels: "tf.Tensor", idxs: "tf.Tensor"
class DotProductLoss(tf.keras.layers.Layer): tiled = tf.tile(tf.expand_dims(x, 0), (batch_size, 1, 1)) return tf.gather(tiled, idxs, batch_dims=1) def _get_bad_mask( self, labels: "tf.Tensor", target_labels: "tf.Tensor", idxs: "tf.Tensor"
487
https://:@github.com/RasaHQ/rasa.git
75e04b5ec3cb1ac6925152f491db074a391a9378
@@ -67,7 +67,7 @@ class SpacyFeaturizer(Featurizer): non_zero_features = np.array([f for f in features if f.any()]) if self.pooling_operation == "mean": - return np.mean(features, axis=0, keepdims=True) + return np.mean(non_zero_features, axis=0, keepdims=True) elif self.pooling_operation == "max": return np.max(features, axis=0, keepdims=True) else:
rasa/nlu/featurizers/dense_featurizer/spacy_featurizer.py
ReplaceText(target='non_zero_features' @(70,27)->(70,35))
class SpacyFeaturizer(Featurizer): non_zero_features = np.array([f for f in features if f.any()]) if self.pooling_operation == "mean": return np.mean(features, axis=0, keepdims=True) elif self.pooling_operation == "max": return np.max(features, axis=0, keepdims=True) else:
class SpacyFeaturizer(Featurizer): non_zero_features = np.array([f for f in features if f.any()]) if self.pooling_operation == "mean": return np.mean(non_zero_features, axis=0, keepdims=True) elif self.pooling_operation == "max": return np.max(features, axis=0, keepdims=True) else:
488
https://:@github.com/RasaHQ/rasa.git
ba4b8f70ffbb7bb4d3429c3b6aaa1f9fbcc3f632
@@ -69,7 +69,7 @@ class SpacyFeaturizer(Featurizer): if self.pooling_operation == "mean": return np.mean(non_zero_features, axis=0, keepdims=True) elif self.pooling_operation == "max": - return np.max(features, axis=0, keepdims=True) + return np.max(non_zero_features, axis=0, keepdims=True) else: raise ValueError( f"Invalid pooling operation specified. Available operations are "
rasa/nlu/featurizers/dense_featurizer/spacy_featurizer.py
ReplaceText(target='non_zero_features' @(72,26)->(72,34))
class SpacyFeaturizer(Featurizer): if self.pooling_operation == "mean": return np.mean(non_zero_features, axis=0, keepdims=True) elif self.pooling_operation == "max": return np.max(features, axis=0, keepdims=True) else: raise ValueError( f"Invalid pooling operation specified. Available operations are "
class SpacyFeaturizer(Featurizer): if self.pooling_operation == "mean": return np.mean(non_zero_features, axis=0, keepdims=True) elif self.pooling_operation == "max": return np.max(non_zero_features, axis=0, keepdims=True) else: raise ValueError( f"Invalid pooling operation specified. Available operations are "
489
https://:@github.com/RasaHQ/rasa.git
a2552e73fc8e4a656f43796101121ff8963bc0da
@@ -50,7 +50,7 @@ class EntityExtractor(Component): # get indices of entity labels that belong to one word for idx in range(1, len(entities)): if entities[idx]["start"] == entities[idx - 1]["end"]: - if entity_indices and entity_indices[-1][1] == idx - 1: + if entity_indices and entity_indices[-1][-1] == idx - 1: entity_indices[-1].append(idx) else: entity_indices.append([idx - 1, idx])
rasa/nlu/extractors/extractor.py
ReplaceText(target='-1' @(53,57)->(53,58))
class EntityExtractor(Component): # get indices of entity labels that belong to one word for idx in range(1, len(entities)): if entities[idx]["start"] == entities[idx - 1]["end"]: if entity_indices and entity_indices[-1][1] == idx - 1: entity_indices[-1].append(idx) else: entity_indices.append([idx - 1, idx])
class EntityExtractor(Component): # get indices of entity labels that belong to one word for idx in range(1, len(entities)): if entities[idx]["start"] == entities[idx - 1]["end"]: if entity_indices and entity_indices[-1][-1] == idx - 1: entity_indices[-1].append(idx) else: entity_indices.append([idx - 1, idx])
490
https://:@github.com/RasaHQ/rasa.git
16ac7259b842e188c5c012bcaf6303e4bf4a4602
@@ -223,7 +223,7 @@ class RasaModel(tf.keras.models.Model): self.save(self.best_model_file, overwrite=True) if best_model_epoch >= 0: - logger.info(f'The model of epoch {epoch} (out of {epochs} in total) will be stored!') + logger.info(f'The model of epoch {best_model_epoch} (out of {epochs} in total) will be stored!') if self.model_summary_file is not None: self._write_model_summary()
rasa/utils/tensorflow/models.py
ReplaceText(target='best_model_epoch' @(226,46)->(226,51))
class RasaModel(tf.keras.models.Model): self.save(self.best_model_file, overwrite=True) if best_model_epoch >= 0: logger.info(f'The model of epoch {epoch} (out of {epochs} in total) will be stored!') if self.model_summary_file is not None: self._write_model_summary()
class RasaModel(tf.keras.models.Model): self.save(self.best_model_file, overwrite=True) if best_model_epoch >= 0: logger.info(f'The model of epoch {best_model_epoch} (out of {epochs} in total) will be stored!') if self.model_summary_file is not None: self._write_model_summary()
491
https://:@github.com/RasaHQ/rasa.git
194820a60d61607fc480a95d981cb570e9ec3d4f
@@ -254,7 +254,7 @@ class RasaModel(tf.keras.models.Model): val_results = self._get_metric_results(prefix="val_") if self._does_model_improve(val_results): logger.debug(f"Creating model checkpoint after training...") - best_model_epoch = epoch + best_model_epoch = epochs self.save(self.best_model_file, overwrite=True) if best_model_epoch >= 0:
rasa/utils/tensorflow/models.py
ReplaceText(target='epochs' @(257,35)->(257,40))
class RasaModel(tf.keras.models.Model): val_results = self._get_metric_results(prefix="val_") if self._does_model_improve(val_results): logger.debug(f"Creating model checkpoint after training...") best_model_epoch = epoch self.save(self.best_model_file, overwrite=True) if best_model_epoch >= 0:
class RasaModel(tf.keras.models.Model): val_results = self._get_metric_results(prefix="val_") if self._does_model_improve(val_results): logger.debug(f"Creating model checkpoint after training...") best_model_epoch = epochs self.save(self.best_model_file, overwrite=True) if best_model_epoch >= 0:
492
https://:@github.com/gbrammer/grizli.git
a14c6aef2fb7790a5418ac882e332cbddf3771d9
@@ -87,7 +87,7 @@ def run_all(id, t0=None, t1=None, fwhm=1200, zr=[0.65, 1.6], dz=[0.004, 0.0002], if scale_photometry: scl = mb.scale_to_photometry(z=fit.meta['z_map'][0], method='lm', templates=t0, order=scale_photometry*1) - if scl.status == 0: + if scl.status > 0: mb.pscale = scl.x st.pscale = scl.x
grizli/fitting.py
ReplaceText(target='>' @(90,22)->(90,24))
def run_all(id, t0=None, t1=None, fwhm=1200, zr=[0.65, 1.6], dz=[0.004, 0.0002], if scale_photometry: scl = mb.scale_to_photometry(z=fit.meta['z_map'][0], method='lm', templates=t0, order=scale_photometry*1) if scl.status == 0: mb.pscale = scl.x st.pscale = scl.x
def run_all(id, t0=None, t1=None, fwhm=1200, zr=[0.65, 1.6], dz=[0.004, 0.0002], if scale_photometry: scl = mb.scale_to_photometry(z=fit.meta['z_map'][0], method='lm', templates=t0, order=scale_photometry*1) if scl.status > 0: mb.pscale = scl.x st.pscale = scl.x
493
https://:@github.com/gbrammer/grizli.git
06b2bb1a51cc4090eb57f37798a4b6cb6b24b2c2
@@ -2291,7 +2291,7 @@ For example, # Pixel area map pam = os.path.join(os.getenv('iref'), 'ir_wfc3_map.fits') print('Pixel area map: {0}'.format(pam)) - if not os.path.exists(badpix): + if not os.path.exists(pam): os.system('curl -o {0} http://www.stsci.edu/hst/wfc3/pam/ir_wfc3_map.fits'.format(pam)) def fetch_config_files(ACS=False):
grizli/utils.py
ReplaceText(target='pam' @(2294,26)->(2294,32))
For example, # Pixel area map pam = os.path.join(os.getenv('iref'), 'ir_wfc3_map.fits') print('Pixel area map: {0}'.format(pam)) if not os.path.exists(badpix): os.system('curl -o {0} http://www.stsci.edu/hst/wfc3/pam/ir_wfc3_map.fits'.format(pam)) def fetch_config_files(ACS=False):
For example, # Pixel area map pam = os.path.join(os.getenv('iref'), 'ir_wfc3_map.fits') print('Pixel area map: {0}'.format(pam)) if not os.path.exists(pam): os.system('curl -o {0} http://www.stsci.edu/hst/wfc3/pam/ir_wfc3_map.fits'.format(pam)) def fetch_config_files(ACS=False):
494
https://:@github.com/gbrammer/grizli.git
60c15addafcb4ac4a0e55bc697b4bb18d6463736
@@ -229,7 +229,7 @@ def go(root='j010311+131615', maglim=[17,26], HOME_PATH='/Volumes/Pegasus/Grizli ir_ref = None auto_script.drizzle_overlaps(root, filters=optical_filters, - make_combined=(ir_ref is not None), ref_image=ir_ref) + make_combined=(ir_ref is None), ref_image=ir_ref) if ir_ref is None: # Need
grizli/pipeline/auto_script.py
ReplaceText(target=' is ' @(232,33)->(232,41))
def go(root='j010311+131615', maglim=[17,26], HOME_PATH='/Volumes/Pegasus/Grizli ir_ref = None auto_script.drizzle_overlaps(root, filters=optical_filters, make_combined=(ir_ref is not None), ref_image=ir_ref) if ir_ref is None: # Need
def go(root='j010311+131615', maglim=[17,26], HOME_PATH='/Volumes/Pegasus/Grizli ir_ref = None auto_script.drizzle_overlaps(root, filters=optical_filters, make_combined=(ir_ref is None), ref_image=ir_ref) if ir_ref is None: # Need
495
https://:@github.com/gbrammer/grizli.git
e32899e470d02fa98365ca1ab1bfc70d6b64077b
@@ -3420,7 +3420,7 @@ def field_rgb(root='j010514+021532', xsize=6, output_dpi=None, HOME_PATH='./', s PATH_TO = '{0}/{1}/Prep'.format(HOME_PATH, root) else: PATH_TO = './' - sci_files = glob.glob('./{1}-f*sci.fits'.format(HOME_PATH, root)) + sci_files = glob.glob('./{1}-f*sci.fits'.format(PATH_TO, root)) if filters is None: filters = [file.split('_')[-3].split('-')[-1] for file in sci_files]
grizli/pipeline/auto_script.py
ReplaceText(target='PATH_TO' @(3423,56)->(3423,65))
def field_rgb(root='j010514+021532', xsize=6, output_dpi=None, HOME_PATH='./', s PATH_TO = '{0}/{1}/Prep'.format(HOME_PATH, root) else: PATH_TO = './' sci_files = glob.glob('./{1}-f*sci.fits'.format(HOME_PATH, root)) if filters is None: filters = [file.split('_')[-3].split('-')[-1] for file in sci_files]
def field_rgb(root='j010514+021532', xsize=6, output_dpi=None, HOME_PATH='./', s PATH_TO = '{0}/{1}/Prep'.format(HOME_PATH, root) else: PATH_TO = './' sci_files = glob.glob('./{1}-f*sci.fits'.format(PATH_TO, root)) if filters is None: filters = [file.split('_')[-3].split('-')[-1] for file in sci_files]
496
https://:@github.com/gbrammer/grizli.git
27974fdbe2c948dee6777f6c6d333b46e1456a80
@@ -575,7 +575,7 @@ class GroupFLT(): is_cgs=False): """TBD """ - if cpu_count == 0: + if cpu_count <= 0: cpu_count = mp.cpu_count() if fit_info is None:
grizli/multifit.py
ReplaceText(target='<=' @(578,21)->(578,23))
class GroupFLT(): is_cgs=False): """TBD """ if cpu_count == 0: cpu_count = mp.cpu_count() if fit_info is None:
class GroupFLT(): is_cgs=False): """TBD """ if cpu_count <= 0: cpu_count = mp.cpu_count() if fit_info is None:
497
https://:@github.com/gbrammer/grizli.git
73b7211978b46ce1f7bcc1de111237c047fbe00e
@@ -959,7 +959,7 @@ def parse_visits(field_root='', HOME_PATH='./', use_visit=True, combine_same_pa= elif (combine_minexp > 0) & (not has_grism): combined = [] for visit in visits: - if len(visit['files']) > combine_minexp*1: + if len(visit['files']) >= combine_minexp*1: combined.append(copy.deepcopy(visit)) else: filter_pa = '-'.join(visit['product'].split('-')[-2:])
grizli/pipeline/auto_script.py
ReplaceText(target='>=' @(962,35)->(962,36))
def parse_visits(field_root='', HOME_PATH='./', use_visit=True, combine_same_pa= elif (combine_minexp > 0) & (not has_grism): combined = [] for visit in visits: if len(visit['files']) > combine_minexp*1: combined.append(copy.deepcopy(visit)) else: filter_pa = '-'.join(visit['product'].split('-')[-2:])
def parse_visits(field_root='', HOME_PATH='./', use_visit=True, combine_same_pa= elif (combine_minexp > 0) & (not has_grism): combined = [] for visit in visits: if len(visit['files']) >= combine_minexp*1: combined.append(copy.deepcopy(visit)) else: filter_pa = '-'.join(visit['product'].split('-')[-2:])
498
https://:@github.com/gbrammer/grizli.git
54178395a55d79f53bf11d651b53bb6bc3448eb6
@@ -3386,7 +3386,7 @@ def make_filter_combinations(root, weight_fnu=True, filter_combinations=FILTER_C # UVIS if filt_i.startswith('f') & filt_i.endswith('u'): - filt_i = filt_i[:1] + filt_i = filt_i[:-1] band = None for f in filter_combinations:
grizli/pipeline/auto_script.py
ReplaceText(target='-1' @(3389,29)->(3389,30))
def make_filter_combinations(root, weight_fnu=True, filter_combinations=FILTER_C # UVIS if filt_i.startswith('f') & filt_i.endswith('u'): filt_i = filt_i[:1] band = None for f in filter_combinations:
def make_filter_combinations(root, weight_fnu=True, filter_combinations=FILTER_C # UVIS if filt_i.startswith('f') & filt_i.endswith('u'): filt_i = filt_i[:-1] band = None for f in filter_combinations:
499
https://:@github.com/PmagPy/PmagPy.git
153e127b39023f35ad6724b0e89609849ec48689
@@ -33,7 +33,7 @@ def main(): try: fh_last = open(last_path, 'r+') last_checked = pickle.load(fh_last) - if last_checked > time.time() - 24*60*60: + if last_checked < time.time() - 24*60*60: return # stop here because it's been less than 24 hours else: pickle.dump(time.time(), fh_last)
check_updates.py
ReplaceText(target='<' @(36,24)->(36,25))
def main(): try: fh_last = open(last_path, 'r+') last_checked = pickle.load(fh_last) if last_checked > time.time() - 24*60*60: return # stop here because it's been less than 24 hours else: pickle.dump(time.time(), fh_last)
def main(): try: fh_last = open(last_path, 'r+') last_checked = pickle.load(fh_last) if last_checked < time.time() - 24*60*60: return # stop here because it's been less than 24 hours else: pickle.dump(time.time(), fh_last)