Unnamed: 0
int64
0
2.44k
repo
stringlengths
32
81
hash
stringlengths
40
40
diff
stringlengths
113
1.17k
old_path
stringlengths
5
84
rewrite
stringlengths
34
79
initial_state
stringlengths
75
980
final_state
stringlengths
76
980
100
https://:@github.com/allenai/SciSpaCy.git
d0d1d525943e051762ea5482ba82c9a4718285c0
@@ -58,7 +58,7 @@ def train_parser_and_tagger(train_json_path: str, parser = nlp.get_pipe('parser') train_corpus = GoldCorpus(train_json_path, dev_json_path) - test_corpus = GoldCorpus(train_json_path, dev_json_path) + test_corpus = GoldCorpus(train_json_path, test_json_path) if ontonotes_path: onto_train_path = os.path.join(ontonotes_path, "train")
scripts/train_parser_and_tagger.py
ReplaceText(target='test_json_path' @(61,46)->(61,59))
def train_parser_and_tagger(train_json_path: str, parser = nlp.get_pipe('parser') train_corpus = GoldCorpus(train_json_path, dev_json_path) test_corpus = GoldCorpus(train_json_path, dev_json_path) if ontonotes_path: onto_train_path = os.path.join(ontonotes_path, "train")
def train_parser_and_tagger(train_json_path: str, parser = nlp.get_pipe('parser') train_corpus = GoldCorpus(train_json_path, dev_json_path) test_corpus = GoldCorpus(train_json_path, test_json_path) if ontonotes_path: onto_train_path = os.path.join(ontonotes_path, "train")
101
https://:@github.com/allenai/SciSpaCy.git
e15e9b7913459b6139d4f30030a502f30479cfd5
@@ -190,7 +190,7 @@ def train_parser_and_tagger(train_json_path: str, print("Token acc:", scorer_onto_retrained.token_acc) with open(os.path.join(model_output_dir, "ontonotes_test.json")) as metric_file: - json.dump(scorer.scores, metric_file) + json.dump(scorer_onto_retrained.scores, metric_file) if __name__ == "__main__": parser = argparse.ArgumentParser()
scripts/train_parser_and_tagger.py
ReplaceText(target='scorer_onto_retrained' @(193,22)->(193,28))
def train_parser_and_tagger(train_json_path: str, print("Token acc:", scorer_onto_retrained.token_acc) with open(os.path.join(model_output_dir, "ontonotes_test.json")) as metric_file: json.dump(scorer.scores, metric_file) if __name__ == "__main__": parser = argparse.ArgumentParser()
def train_parser_and_tagger(train_json_path: str, print("Token acc:", scorer_onto_retrained.token_acc) with open(os.path.join(model_output_dir, "ontonotes_test.json")) as metric_file: json.dump(scorer_onto_retrained.scores, metric_file) if __name__ == "__main__": parser = argparse.ArgumentParser()
102
https://:@github.com/allenai/SciSpaCy.git
47400cbed4d4943f6bba7ed013bf80110f738f2e
@@ -679,7 +679,7 @@ def eval_candidate_generation_and_linking(examples: List[data_util.MedMentionExa if generate_linking_classifier_training_data: for candidates, mention_types_for_mention in zip(candidates_by_mention, mention_types_by_mention): for candidate_id, candidate in candidates.items(): - classifier_example = linker.classifier_example(candidate_id, candidate, mention_text, mention_types) + classifier_example = linker.classifier_example(candidate_id, candidate, mention_text, mention_types_for_mention) classifier_example['label'] = int(gold_entity.umls_id == candidate_id) linking_classifier_training_data.append(classifier_example)
scripts/linking.py
ReplaceText(target='mention_types_for_mention' @(682,118)->(682,131))
def eval_candidate_generation_and_linking(examples: List[data_util.MedMentionExa if generate_linking_classifier_training_data: for candidates, mention_types_for_mention in zip(candidates_by_mention, mention_types_by_mention): for candidate_id, candidate in candidates.items(): classifier_example = linker.classifier_example(candidate_id, candidate, mention_text, mention_types) classifier_example['label'] = int(gold_entity.umls_id == candidate_id) linking_classifier_training_data.append(classifier_example)
def eval_candidate_generation_and_linking(examples: List[data_util.MedMentionExa if generate_linking_classifier_training_data: for candidates, mention_types_for_mention in zip(candidates_by_mention, mention_types_by_mention): for candidate_id, candidate in candidates.items(): classifier_example = linker.classifier_example(candidate_id, candidate, mention_text, mention_types_for_mention) classifier_example['label'] = int(gold_entity.umls_id == candidate_id) linking_classifier_training_data.append(classifier_example)
103
https://:@github.com/Burnysc2/python-sc2.git
c3f5b0de304727914a2a59d5cfde6dda04071686
@@ -381,7 +381,7 @@ class BotAI(object): return ActionResult.CantFindPlacementLocation unit = unit or self.select_build_worker(p) - if unit is None or self.can_afford(building): + if unit is None or not self.can_afford(building): return ActionResult.Error return await self.do(unit.build(building, p))
sc2/bot_ai.py
ReplaceText(target='not ' @(384,27)->(384,27))
class BotAI(object): return ActionResult.CantFindPlacementLocation unit = unit or self.select_build_worker(p) if unit is None or self.can_afford(building): return ActionResult.Error return await self.do(unit.build(building, p))
class BotAI(object): return ActionResult.CantFindPlacementLocation unit = unit or self.select_build_worker(p) if unit is None or not self.can_afford(building): return ActionResult.Error return await self.do(unit.build(building, p))
104
https://:@github.com/Burnysc2/python-sc2.git
20468d57d6fd5cc80c9e7757d75e987bd81e632a
@@ -185,7 +185,7 @@ class Point2(Pointlike): Used in ramp finding """ assert self != p distanceBetweenPoints = self.distance_to(p) - assert r > distanceBetweenPoints / 2 + assert r >= distanceBetweenPoints / 2 # remaining distance from center towards the intersection, using pythagoras remainingDistanceFromCenter = (r ** 2 - (distanceBetweenPoints / 2) ** 2) ** 0.5 # center of both points
sc2/position.py
ReplaceText(target='>=' @(188,17)->(188,18))
class Point2(Pointlike): Used in ramp finding """ assert self != p distanceBetweenPoints = self.distance_to(p) assert r > distanceBetweenPoints / 2 # remaining distance from center towards the intersection, using pythagoras remainingDistanceFromCenter = (r ** 2 - (distanceBetweenPoints / 2) ** 2) ** 0.5 # center of both points
class Point2(Pointlike): Used in ramp finding """ assert self != p distanceBetweenPoints = self.distance_to(p) assert r >= distanceBetweenPoints / 2 # remaining distance from center towards the intersection, using pythagoras remainingDistanceFromCenter = (r ** 2 - (distanceBetweenPoints / 2) ** 2) ** 0.5 # center of both points
105
https://:@github.com/Burnysc2/python-sc2.git
9470180d3caaa18098ebffea220719ad6083afbe
@@ -185,7 +185,7 @@ class Point2(Pointlike): Used in ramp finding """ assert self != p distanceBetweenPoints = self.distance_to(p) - assert r > distanceBetweenPoints / 2 + assert r >= distanceBetweenPoints / 2 # remaining distance from center towards the intersection, using pythagoras remainingDistanceFromCenter = (r ** 2 - (distanceBetweenPoints / 2) ** 2) ** 0.5 # center of both points
sc2/position.py
ReplaceText(target='>=' @(188,17)->(188,18))
class Point2(Pointlike): Used in ramp finding """ assert self != p distanceBetweenPoints = self.distance_to(p) assert r > distanceBetweenPoints / 2 # remaining distance from center towards the intersection, using pythagoras remainingDistanceFromCenter = (r ** 2 - (distanceBetweenPoints / 2) ** 2) ** 0.5 # center of both points
class Point2(Pointlike): Used in ramp finding """ assert self != p distanceBetweenPoints = self.distance_to(p) assert r >= distanceBetweenPoints / 2 # remaining distance from center towards the intersection, using pythagoras remainingDistanceFromCenter = (r ** 2 - (distanceBetweenPoints / 2) ** 2) ** 0.5 # center of both points
106
https://:@github.com/Burnysc2/python-sc2.git
9c59f42e1722e1141a51300d472b0385d64f1640
@@ -86,7 +86,7 @@ class Units(list): return self[0] def take(self, n: int) -> "Units": - if self.amount >= n: + if self.amount <= n: return self else: return self.subgroup(self[:n])
sc2/units.py
ReplaceText(target='<=' @(89,23)->(89,25))
class Units(list): return self[0] def take(self, n: int) -> "Units": if self.amount >= n: return self else: return self.subgroup(self[:n])
class Units(list): return self[0] def take(self, n: int) -> "Units": if self.amount <= n: return self else: return self.subgroup(self[:n])
107
https://:@github.com/kenjyco/beu.git
4aea6146fc5f01df3e344b9fadddf28b795dac89
@@ -36,7 +36,7 @@ def _get_settings_file(): if not os.path.exists(home_config_dir): os.makedirs(home_config_dir) copyfile(sample_file, settings_file) - print('\nCopied settings to {}'.format(repr(home_config_dir))) + print('\nCopied settings to {}'.format(repr(settings_file))) return settings_file
beu/__init__.py
ReplaceText(target='settings_file' @(39,48)->(39,63))
def _get_settings_file(): if not os.path.exists(home_config_dir): os.makedirs(home_config_dir) copyfile(sample_file, settings_file) print('\nCopied settings to {}'.format(repr(home_config_dir))) return settings_file
def _get_settings_file(): if not os.path.exists(home_config_dir): os.makedirs(home_config_dir) copyfile(sample_file, settings_file) print('\nCopied settings to {}'.format(repr(settings_file))) return settings_file
108
https://:@github.com/WattTime/pyiso.git
009316799f872c90f50b73e41f3a6f9021f8b2cc
@@ -111,7 +111,7 @@ class BaseClient(object): self.options.update(kwargs) # check start_at and end_at args - if self.options.get('start_at', None) and self.options.get('end_at', None): + if self.options.get('start_at', None) or self.options.get('end_at', None): assert self.options['start_at'] < self.options['end_at'] self.options['start_at'] = self.utcify(self.options['start_at']) self.options['end_at'] = self.utcify(self.options['end_at'])
pyiso/base.py
ReplaceText(target='or' @(114,46)->(114,49))
class BaseClient(object): self.options.update(kwargs) # check start_at and end_at args if self.options.get('start_at', None) and self.options.get('end_at', None): assert self.options['start_at'] < self.options['end_at'] self.options['start_at'] = self.utcify(self.options['start_at']) self.options['end_at'] = self.utcify(self.options['end_at'])
class BaseClient(object): self.options.update(kwargs) # check start_at and end_at args if self.options.get('start_at', None) or self.options.get('end_at', None): assert self.options['start_at'] < self.options['end_at'] self.options['start_at'] = self.utcify(self.options['start_at']) self.options['end_at'] = self.utcify(self.options['end_at'])
109
https://:@github.com/WattTime/pyiso.git
31c2680f1e96d6affbbef6dc75b9ab3724a7d8b9
@@ -117,7 +117,7 @@ class BaseClient(object): self.options.update(kwargs) # check start_at and end_at args - if self.options.get('start_at', None) or self.options.get('end_at', None): + if self.options.get('start_at', None) and self.options.get('end_at', None): assert self.options['start_at'] < self.options['end_at'] self.options['start_at'] = self.utcify(self.options['start_at']) self.options['end_at'] = self.utcify(self.options['end_at'])
pyiso/base.py
ReplaceText(target='and' @(120,46)->(120,48))
class BaseClient(object): self.options.update(kwargs) # check start_at and end_at args if self.options.get('start_at', None) or self.options.get('end_at', None): assert self.options['start_at'] < self.options['end_at'] self.options['start_at'] = self.utcify(self.options['start_at']) self.options['end_at'] = self.utcify(self.options['end_at'])
class BaseClient(object): self.options.update(kwargs) # check start_at and end_at args if self.options.get('start_at', None) and self.options.get('end_at', None): assert self.options['start_at'] < self.options['end_at'] self.options['start_at'] = self.utcify(self.options['start_at']) self.options['end_at'] = self.utcify(self.options['end_at'])
110
https://:@github.com/WattTime/pyiso.git
14fd34daa69230352ecb651ee8f30a4566ab6c59
@@ -18,7 +18,7 @@ class TestGenerationTask(TestCase): expected = client_factory(ba).get_generation(**kwargs) received = tasks.get_generation(ba, **kwargs) - for i in range(len(expected)): + for i in range(len(received)): if expected[i]['timestamp'] == received[i]['timestamp']: self.assertEqual(expected[i]['gen_MW'], received[i]['gen_MW']) self.assertEqual(expected[i]['fuel_name'], received[i]['fuel_name'])
tests/test_tasks.py
ReplaceText(target='received' @(21,27)->(21,35))
class TestGenerationTask(TestCase): expected = client_factory(ba).get_generation(**kwargs) received = tasks.get_generation(ba, **kwargs) for i in range(len(expected)): if expected[i]['timestamp'] == received[i]['timestamp']: self.assertEqual(expected[i]['gen_MW'], received[i]['gen_MW']) self.assertEqual(expected[i]['fuel_name'], received[i]['fuel_name'])
class TestGenerationTask(TestCase): expected = client_factory(ba).get_generation(**kwargs) received = tasks.get_generation(ba, **kwargs) for i in range(len(received)): if expected[i]['timestamp'] == received[i]['timestamp']: self.assertEqual(expected[i]['gen_MW'], received[i]['gen_MW']) self.assertEqual(expected[i]['fuel_name'], received[i]['fuel_name'])
111
https://:@github.com/TTWShell/hobbit-core.git
b51217f7fc8cb238c1dc09e8932178cda40cf2b4
@@ -133,4 +133,4 @@ class TestTransaction(BaseTest): with pytest.raises(Exception): view_func2() - assert len(User.query.all()) == 1 + assert len(User.query.all()) == 0
tests/test_db.py
ReplaceText(target='0' @(136,40)->(136,41))
class TestTransaction(BaseTest): with pytest.raises(Exception): view_func2() assert len(User.query.all()) == 1
class TestTransaction(BaseTest): with pytest.raises(Exception): view_func2() assert len(User.query.all()) == 0
112
https://:@github.com/ktdreyer/jenkins-job-wrecker.git
3be4ea2d49a46cec8223361142305107b88ca889
@@ -162,7 +162,7 @@ def prebuildcleanup(top, parent): pass else: raise NotImplementedError("cannot handle " - "XML %s" % subelement.tag) + "XML %s" % element.tag) for rule in preclean_patterns: if preclean_patterns[rule] is not None and len(preclean_patterns[rule]) > 0:
jenkins_job_wrecker/modules/buildwrappers.py
ReplaceText(target='element' @(165,49)->(165,59))
def prebuildcleanup(top, parent): pass else: raise NotImplementedError("cannot handle " "XML %s" % subelement.tag) for rule in preclean_patterns: if preclean_patterns[rule] is not None and len(preclean_patterns[rule]) > 0:
def prebuildcleanup(top, parent): pass else: raise NotImplementedError("cannot handle " "XML %s" % element.tag) for rule in preclean_patterns: if preclean_patterns[rule] is not None and len(preclean_patterns[rule]) > 0:
113
https://:@github.com/Sage-Bionetworks/Genie.git
7a6ff590425d32fd596687bd0d35bc98680fc5b8
@@ -379,7 +379,7 @@ class clinical(example_filetype_format.FileTypeFormat): patient_patients = clinicalDF[patientId][clinicalDF[patientId] != ""] # #CHECK: All samples must have associated patient data (GENIE requires patient data) if not all(sample_patients.isin(patient_patients)): - total_error += "Sample: All samples must have associated patient information. These samples are missing patient data: %s\n" % ", ".join(clinicalSampleDF[patientId][~clinicalSampleDF[patientId].isin(clinicalDF[patientId])]) + total_error += "Sample: All samples must have associated patient information. These samples are missing patient data: %s\n" % ", ".join(clinicalSampleDF[sampleId][~clinicalSampleDF[patientId].isin(clinicalDF[patientId])]) #CHECK: All patients must have associated sample data if not all(patient_patients.isin(sample_patients)): ### MAKE WARNING FOR NOW###
processing/clinical.py
ReplaceText(target='sampleId' @(382,157)->(382,166))
class clinical(example_filetype_format.FileTypeFormat): patient_patients = clinicalDF[patientId][clinicalDF[patientId] != ""] # #CHECK: All samples must have associated patient data (GENIE requires patient data) if not all(sample_patients.isin(patient_patients)): total_error += "Sample: All samples must have associated patient information. These samples are missing patient data: %s\n" % ", ".join(clinicalSampleDF[patientId][~clinicalSampleDF[patientId].isin(clinicalDF[patientId])]) #CHECK: All patients must have associated sample data if not all(patient_patients.isin(sample_patients)): ### MAKE WARNING FOR NOW###
class clinical(example_filetype_format.FileTypeFormat): patient_patients = clinicalDF[patientId][clinicalDF[patientId] != ""] # #CHECK: All samples must have associated patient data (GENIE requires patient data) if not all(sample_patients.isin(patient_patients)): total_error += "Sample: All samples must have associated patient information. These samples are missing patient data: %s\n" % ", ".join(clinicalSampleDF[sampleId][~clinicalSampleDF[patientId].isin(clinicalDF[patientId])]) #CHECK: All patients must have associated sample data if not all(patient_patients.isin(sample_patients)): ### MAKE WARNING FOR NOW###
114
https://:@github.com/Sage-Bionetworks/Genie.git
b5b670e4a5797ba2a66caa517bbe2160fec645f7
@@ -209,7 +209,7 @@ def createMafDatabase(syn, databaseToSynIdMappingDf,testing=False,staging=False) #Make sure to store the newly created maf db synid into the staging synapse mapping databaseToSynIdMapping = syn.tableQuery("SELECT * FROM syn12094210 where Database = 'vcf2maf'") databaseToSynIdMappingDf = databaseToSynIdMapping.asDataFrame() - databaseToSynIdMapping['Id'][0] = newMafDb.id + databaseToSynIdMappingDf['Id'][0] = newMafDb.id syn.store(synapseclient.Table("syn12094210",databaseToSynIdMappingDf)) #Move and archive old mafdatabase mafDatabaseEnt.parentId = "syn7208886"
processing/input_to_database.py
ReplaceText(target='databaseToSynIdMappingDf' @(212,2)->(212,24))
def createMafDatabase(syn, databaseToSynIdMappingDf,testing=False,staging=False) #Make sure to store the newly created maf db synid into the staging synapse mapping databaseToSynIdMapping = syn.tableQuery("SELECT * FROM syn12094210 where Database = 'vcf2maf'") databaseToSynIdMappingDf = databaseToSynIdMapping.asDataFrame() databaseToSynIdMapping['Id'][0] = newMafDb.id syn.store(synapseclient.Table("syn12094210",databaseToSynIdMappingDf)) #Move and archive old mafdatabase mafDatabaseEnt.parentId = "syn7208886"
def createMafDatabase(syn, databaseToSynIdMappingDf,testing=False,staging=False) #Make sure to store the newly created maf db synid into the staging synapse mapping databaseToSynIdMapping = syn.tableQuery("SELECT * FROM syn12094210 where Database = 'vcf2maf'") databaseToSynIdMappingDf = databaseToSynIdMapping.asDataFrame() databaseToSynIdMappingDf['Id'][0] = newMafDb.id syn.store(synapseclient.Table("syn12094210",databaseToSynIdMappingDf)) #Move and archive old mafdatabase mafDatabaseEnt.parentId = "syn7208886"
115
https://:@github.com/Sage-Bionetworks/Genie.git
78fcacd10d4c44c90f41893a161de24ff13ea774
@@ -311,7 +311,7 @@ class clinical(FileTypeFormat): haveColumn = process_functions.checkColExist(clinicalDF, "SEQ_ASSAY_ID") if haveColumn: if not all([i != "" for i in clinicalDF['SEQ_ASSAY_ID']]): - total_error += "Sample: Please double check your SEQ_ASSAY_ID columns, there are empty rows.\n" + warning += "Sample: Please double check your SEQ_ASSAY_ID columns, there are empty rows.\n" #must remove empty seq assay ids first #Checking if seq assay ids start with the center name seqAssayIds = clinicalDF.SEQ_ASSAY_ID[clinicalDF.SEQ_ASSAY_ID != ""]
genie/clinical.py
ReplaceText(target='warning' @(314,4)->(314,15))
class clinical(FileTypeFormat): haveColumn = process_functions.checkColExist(clinicalDF, "SEQ_ASSAY_ID") if haveColumn: if not all([i != "" for i in clinicalDF['SEQ_ASSAY_ID']]): total_error += "Sample: Please double check your SEQ_ASSAY_ID columns, there are empty rows.\n" #must remove empty seq assay ids first #Checking if seq assay ids start with the center name seqAssayIds = clinicalDF.SEQ_ASSAY_ID[clinicalDF.SEQ_ASSAY_ID != ""]
class clinical(FileTypeFormat): haveColumn = process_functions.checkColExist(clinicalDF, "SEQ_ASSAY_ID") if haveColumn: if not all([i != "" for i in clinicalDF['SEQ_ASSAY_ID']]): warning += "Sample: Please double check your SEQ_ASSAY_ID columns, there are empty rows.\n" #must remove empty seq assay ids first #Checking if seq assay ids start with the center name seqAssayIds = clinicalDF.SEQ_ASSAY_ID[clinicalDF.SEQ_ASSAY_ID != ""]
116
https://:@github.com/Sage-Bionetworks/Genie.git
e115f3884db224a231c87b346f04d6edbf66d6bb
@@ -87,7 +87,7 @@ class vcf(maf.maf): tumor = "TUMOR" normal = "NORMAL" # ### If the tumor name isn't TUMOR, set the sample id to be the tumor name - if tumor != "TUMOR": + if tumor == "TUMOR": tumorName = vcfName.replace(".vcf","") else: tumorName = tumor
genie/vcf.py
ReplaceText(target='==' @(90,12)->(90,14))
class vcf(maf.maf): tumor = "TUMOR" normal = "NORMAL" # ### If the tumor name isn't TUMOR, set the sample id to be the tumor name if tumor != "TUMOR": tumorName = vcfName.replace(".vcf","") else: tumorName = tumor
class vcf(maf.maf): tumor = "TUMOR" normal = "NORMAL" # ### If the tumor name isn't TUMOR, set the sample id to be the tumor name if tumor == "TUMOR": tumorName = vcfName.replace(".vcf","") else: tumorName = tumor
117
https://:@github.com/Sage-Bionetworks/Genie.git
4c861c5eccebb0d0a402d85b658924df1e6a8819
@@ -247,7 +247,7 @@ def validation(syn, center, process, center_mapping_df, databaseToSynIdMappingDf duplicatedFiles = duplicatedFiles.append(cbsSegFiles) clinical_bool = ["clinical" in i for i in inputValidStatus['name']] clinical_files = inputValidStatus[clinical_bool] - if len(clinical_bool) > 2: + if len(clinical_files) > 2: duplicatedFiles = duplicatedFiles.append(clinical_files) # nodups = ["data_mutations_extended"]
genie/input_to_database.py
ReplaceText(target='clinical_files' @(250,15)->(250,28))
def validation(syn, center, process, center_mapping_df, databaseToSynIdMappingDf duplicatedFiles = duplicatedFiles.append(cbsSegFiles) clinical_bool = ["clinical" in i for i in inputValidStatus['name']] clinical_files = inputValidStatus[clinical_bool] if len(clinical_bool) > 2: duplicatedFiles = duplicatedFiles.append(clinical_files) # nodups = ["data_mutations_extended"]
def validation(syn, center, process, center_mapping_df, databaseToSynIdMappingDf duplicatedFiles = duplicatedFiles.append(cbsSegFiles) clinical_bool = ["clinical" in i for i in inputValidStatus['name']] clinical_files = inputValidStatus[clinical_bool] if len(clinical_files) > 2: duplicatedFiles = duplicatedFiles.append(clinical_files) # nodups = ["data_mutations_extended"]
118
https://:@github.com/Sage-Bionetworks/Genie.git
39f33cd754223ee515b8d790c0cd96b32f36b88e
@@ -270,7 +270,7 @@ def validatefile(syn, entities, validation_status_table, error_tracker_table, input_status_list, invalid_errors_list = _get_status_and_error_list( valid, message, entities) # Send email the first time the file is invalid - if invalid_errors_list: + if not invalid_errors_list: _send_validation_error_email(syn, filenames, message, file_users) else: input_status_list = [
genie/input_to_database.py
ReplaceText(target='not ' @(273,11)->(273,11))
def validatefile(syn, entities, validation_status_table, error_tracker_table, input_status_list, invalid_errors_list = _get_status_and_error_list( valid, message, entities) # Send email the first time the file is invalid if invalid_errors_list: _send_validation_error_email(syn, filenames, message, file_users) else: input_status_list = [
def validatefile(syn, entities, validation_status_table, error_tracker_table, input_status_list, invalid_errors_list = _get_status_and_error_list( valid, message, entities) # Send email the first time the file is invalid if not invalid_errors_list: _send_validation_error_email(syn, filenames, message, file_users) else: input_status_list = [
119
https://:@github.com/Sage-Bionetworks/Genie.git
e140dee708542afa35c5d1e30671d8083fcbcd29
@@ -512,7 +512,7 @@ class bed(FileTypeFormat): string: Path to new bed file """ final_beddf = self._process(beddf, seq_assay_id, newPath, parentId) - process_functions.updateData(self.syn, databaseSynId, beddf, + process_functions.updateData(self.syn, databaseSynId, final_beddf, seq_assay_id, filterByColumn="SEQ_ASSAY_ID", toDelete=True)
genie/bed.py
ReplaceText(target='final_beddf' @(515,62)->(515,67))
class bed(FileTypeFormat): string: Path to new bed file """ final_beddf = self._process(beddf, seq_assay_id, newPath, parentId) process_functions.updateData(self.syn, databaseSynId, beddf, seq_assay_id, filterByColumn="SEQ_ASSAY_ID", toDelete=True)
class bed(FileTypeFormat): string: Path to new bed file """ final_beddf = self._process(beddf, seq_assay_id, newPath, parentId) process_functions.updateData(self.syn, databaseSynId, final_beddf, seq_assay_id, filterByColumn="SEQ_ASSAY_ID", toDelete=True)
120
https://:@github.com/Libensemble/libensemble.git
a7175c6bf59803deb4a52aaf8c0185b93874ff21
@@ -96,7 +96,7 @@ def decide_work_and_resources(active_w, idle_w, H, H_ind, sim_specs, gen_specs): 'form_subcomm': [], 'calc_in': H[sim_specs['in']][inds_to_send], 'calc_out': sim_specs['out'], - 'calc_info': {'type':'sim', 'pt_ids': q_inds}, + 'calc_info': {'type':'sim', 'pt_ids': inds_to_send}, } update_history_x_out(H, q_inds, Work[i]['calc_in'], i, sim_specs['params'])
code/src/libE_manager.py
ReplaceText(target='inds_to_send' @(99,61)->(99,67))
def decide_work_and_resources(active_w, idle_w, H, H_ind, sim_specs, gen_specs): 'form_subcomm': [], 'calc_in': H[sim_specs['in']][inds_to_send], 'calc_out': sim_specs['out'], 'calc_info': {'type':'sim', 'pt_ids': q_inds}, } update_history_x_out(H, q_inds, Work[i]['calc_in'], i, sim_specs['params'])
def decide_work_and_resources(active_w, idle_w, H, H_ind, sim_specs, gen_specs): 'form_subcomm': [], 'calc_in': H[sim_specs['in']][inds_to_send], 'calc_out': sim_specs['out'], 'calc_info': {'type':'sim', 'pt_ids': inds_to_send}, } update_history_x_out(H, q_inds, Work[i]['calc_in'], i, sim_specs['params'])
121
https://:@github.com/Libensemble/libensemble.git
ad5e021d040efd28a61d6afa2be908e25f363bd4
@@ -52,7 +52,7 @@ def worker_main(c, sim_specs, gen_specs): if tag_out == STOP_TAG: break - comm.send(obj=data_out, dest=0, tag=calc_tag) + comm.send(obj=data_out, dest=0, tag=tag_out) # Clean up for loc in locations.values():
code/src/libE_worker.py
ReplaceText(target='tag_out' @(55,44)->(55,52))
def worker_main(c, sim_specs, gen_specs): if tag_out == STOP_TAG: break comm.send(obj=data_out, dest=0, tag=calc_tag) # Clean up for loc in locations.values():
def worker_main(c, sim_specs, gen_specs): if tag_out == STOP_TAG: break comm.send(obj=data_out, dest=0, tag=tag_out) # Clean up for loc in locations.values():
122
https://:@github.com/Libensemble/libensemble.git
2b33970bdac3034cc7799c2849eae09f9c22fcf6
@@ -40,7 +40,7 @@ class MPIComm(Comm): def kill_pending(self): "Make sure pending requests are cancelled if the comm is killed." for req in self._outbox: - if req.Test(): + if not req.Test(): req.Cancel() self._outbox = []
libensemble/mpi_comms.py
ReplaceText(target='not ' @(43,15)->(43,15))
class MPIComm(Comm): def kill_pending(self): "Make sure pending requests are cancelled if the comm is killed." for req in self._outbox: if req.Test(): req.Cancel() self._outbox = []
class MPIComm(Comm): def kill_pending(self): "Make sure pending requests are cancelled if the comm is killed." for req in self._outbox: if not req.Test(): req.Cancel() self._outbox = []
123
https://:@github.com/Libensemble/libensemble.git
155ba0c0517046e8ec305e76862bc6f0a44fafc7
@@ -136,7 +136,7 @@ class EnvResources: nidstr = splitstr[1].strip("]") nidlst = EnvResources._noderange_append(prefix, nidstr) else: # Multiple Partitions - splitgroups = [str.split('[', 1) for str in splitstr] + splitgroups = [str.split('[', 1) for str in part_splitstr] prefixgroups = [group[0] for group in splitgroups] nodegroups = [group[1].strip(']') for group in splitgroups] nidlst = []
libensemble/env_resources.py
ReplaceText(target='part_splitstr' @(139,56)->(139,64))
class EnvResources: nidstr = splitstr[1].strip("]") nidlst = EnvResources._noderange_append(prefix, nidstr) else: # Multiple Partitions splitgroups = [str.split('[', 1) for str in splitstr] prefixgroups = [group[0] for group in splitgroups] nodegroups = [group[1].strip(']') for group in splitgroups] nidlst = []
class EnvResources: nidstr = splitstr[1].strip("]") nidlst = EnvResources._noderange_append(prefix, nidstr) else: # Multiple Partitions splitgroups = [str.split('[', 1) for str in part_splitstr] prefixgroups = [group[0] for group in splitgroups] nodegroups = [group[1].strip(']') for group in splitgroups] nidlst = []
124
https://:@github.com/Libensemble/libensemble.git
c89e4f0b72361ba84884a3c691d73d8f62b98014
@@ -108,7 +108,7 @@ def give_sim_work_first(W, H, sim_specs, gen_specs, alloc_specs, persis_info): last_size = persis_info.get('last_size') if len(H): # Don't give gen instances in batch mode if points are unfinished - if (gen_specs['user'].get('batch_mode') + if (alloc_specs['user'].get('batch_mode') and not all(np.logical_or(H['returned'][last_size:], H['paused'][last_size:]))): break
libensemble/alloc_funcs/fast_alloc_and_pausing.py
ReplaceText(target='alloc_specs' @(111,20)->(111,29))
def give_sim_work_first(W, H, sim_specs, gen_specs, alloc_specs, persis_info): last_size = persis_info.get('last_size') if len(H): # Don't give gen instances in batch mode if points are unfinished if (gen_specs['user'].get('batch_mode') and not all(np.logical_or(H['returned'][last_size:], H['paused'][last_size:]))): break
def give_sim_work_first(W, H, sim_specs, gen_specs, alloc_specs, persis_info): last_size = persis_info.get('last_size') if len(H): # Don't give gen instances in batch mode if points are unfinished if (alloc_specs['user'].get('batch_mode') and not all(np.logical_or(H['returned'][last_size:], H['paused'][last_size:]))): break
125
https://:@github.com/Libensemble/libensemble.git
c89e4f0b72361ba84884a3c691d73d8f62b98014
@@ -33,7 +33,7 @@ def give_sim_work_first(W, H, sim_specs, gen_specs, alloc_specs, persis_info): last_size = persis_info.get('last_size') if len(H): # Don't give gen instances in batch mode if points are unfinished - if (gen_specs['user'].get('batch_mode') + if (alloc_specs['user'].get('batch_mode') and not all(np.logical_or(H['returned'][last_size:], H['paused'][last_size:]))): break
libensemble/alloc_funcs/fast_alloc_to_aposmm.py
ReplaceText(target='alloc_specs' @(36,20)->(36,29))
def give_sim_work_first(W, H, sim_specs, gen_specs, alloc_specs, persis_info): last_size = persis_info.get('last_size') if len(H): # Don't give gen instances in batch mode if points are unfinished if (gen_specs['user'].get('batch_mode') and not all(np.logical_or(H['returned'][last_size:], H['paused'][last_size:]))): break
def give_sim_work_first(W, H, sim_specs, gen_specs, alloc_specs, persis_info): last_size = persis_info.get('last_size') if len(H): # Don't give gen instances in batch mode if points are unfinished if (alloc_specs['user'].get('batch_mode') and not all(np.logical_or(H['returned'][last_size:], H['paused'][last_size:]))): break
126
https://:@github.com/Libensemble/libensemble.git
c89e4f0b72361ba84884a3c691d73d8f62b98014
@@ -73,7 +73,7 @@ def give_sim_work_first(W, H, sim_specs, gen_specs, alloc_specs, persis_info): # No gen instances in batch mode if workers still working still_working = ~H['returned'] - if gen_specs['user'].get('batch_mode') and np.any(still_working): + if alloc_specs['user'].get('batch_mode') and np.any(still_working): break # Give gen work
libensemble/alloc_funcs/give_sim_work_first.py
ReplaceText(target='alloc_specs' @(76,15)->(76,24))
def give_sim_work_first(W, H, sim_specs, gen_specs, alloc_specs, persis_info): # No gen instances in batch mode if workers still working still_working = ~H['returned'] if gen_specs['user'].get('batch_mode') and np.any(still_working): break # Give gen work
def give_sim_work_first(W, H, sim_specs, gen_specs, alloc_specs, persis_info): # No gen instances in batch mode if workers still working still_working = ~H['returned'] if alloc_specs['user'].get('batch_mode') and np.any(still_working): break # Give gen work
127
https://:@github.com/Libensemble/libensemble.git
55f4a8f05ade4709b53cbe9ef92fe00a0e2e9a79
@@ -59,7 +59,7 @@ def try_and_run_nlopt(H, gen_specs, libE_info): gen_specs['user']['ub'], gen_specs['user']['lb'], local=True, active=True) tag, Work, calc_in = sendrecv_mgr_worker_msg(comm, H_o) if tag in [STOP_TAG, PERSIS_STOP]: - nlopt.forced_stop.message = 'tag=' + str(tag) + nlopt.forced_stop.message = 'tag=' + str(Work) raise nlopt.forced_stop # Return function value (and maybe gradient)
libensemble/gen_funcs/uniform_or_localopt.py
ReplaceText(target='Work' @(62,53)->(62,56))
def try_and_run_nlopt(H, gen_specs, libE_info): gen_specs['user']['ub'], gen_specs['user']['lb'], local=True, active=True) tag, Work, calc_in = sendrecv_mgr_worker_msg(comm, H_o) if tag in [STOP_TAG, PERSIS_STOP]: nlopt.forced_stop.message = 'tag=' + str(tag) raise nlopt.forced_stop # Return function value (and maybe gradient)
def try_and_run_nlopt(H, gen_specs, libE_info): gen_specs['user']['ub'], gen_specs['user']['lb'], local=True, active=True) tag, Work, calc_in = sendrecv_mgr_worker_msg(comm, H_o) if tag in [STOP_TAG, PERSIS_STOP]: nlopt.forced_stop.message = 'tag=' + str(Work) raise nlopt.forced_stop # Return function value (and maybe gradient)
128
https://:@github.com/Libensemble/libensemble.git
c090514bb370c6960db4c8be2ae643534def8d2b
@@ -24,4 +24,4 @@ def persistent_uniform(H, persis_info, gen_specs, libE_info): H_o['x'] = persis_info['rand_stream'].uniform(lb, ub, (b, n)) tag, Work, calc_in = sendrecv_mgr_worker_msg(libE_info['comm'], H_o) - return H_o, persis_info, tag + return H_o, persis_info, Work
libensemble/gen_funcs/persistent_uniform_sampling.py
ReplaceText(target='Work' @(27,29)->(27,32))
def persistent_uniform(H, persis_info, gen_specs, libE_info): H_o['x'] = persis_info['rand_stream'].uniform(lb, ub, (b, n)) tag, Work, calc_in = sendrecv_mgr_worker_msg(libE_info['comm'], H_o) return H_o, persis_info, tag
def persistent_uniform(H, persis_info, gen_specs, libE_info): H_o['x'] = persis_info['rand_stream'].uniform(lb, ub, (b, n)) tag, Work, calc_in = sendrecv_mgr_worker_msg(libE_info['comm'], H_o) return H_o, persis_info, Work
129
https://:@github.com/davedittrich/python_secrets.git
3a477f63bb9d0417e63254ba993025a8666c3e1d
@@ -181,7 +181,7 @@ class PythonSecretsApp(App): self.environment = self.options.environment self.secrets_basedir = self.options.secrets_basedir # Don't output error messages when "complete" command used - if cmd.cmd_name == 'complete': + if cmd.cmd_name != 'complete': SecretsEnvironment.permissions_check( self.secrets_basedir, verbose_level=self.options.verbose_level,
psec/main.py
ReplaceText(target='!=' @(184,24)->(184,26))
class PythonSecretsApp(App): self.environment = self.options.environment self.secrets_basedir = self.options.secrets_basedir # Don't output error messages when "complete" command used if cmd.cmd_name == 'complete': SecretsEnvironment.permissions_check( self.secrets_basedir, verbose_level=self.options.verbose_level,
class PythonSecretsApp(App): self.environment = self.options.environment self.secrets_basedir = self.options.secrets_basedir # Don't output error messages when "complete" command used if cmd.cmd_name != 'complete': SecretsEnvironment.permissions_check( self.secrets_basedir, verbose_level=self.options.verbose_level,
130
https://:@github.com/junzis/pyModeS.git
b2940af6efe4cb6d99bca8a1f1f214292acf3e6a
@@ -79,7 +79,7 @@ def cpr2position(cprlat0, cprlat1, cprlon0, cprlon1, t0, t1): cprlat_even = cprlat0 / 131072.0 cprlat_odd = cprlat1 / 131072.0 cprlon_even = cprlon0 / 131072.0 - cprlon_odd = cprlon0 / 131072.0 + cprlon_odd = cprlon1 / 131072.0 air_d_lat_even = 360.0 / 60 air_d_lat_odd = 360.0 / 59
decoder.py
ReplaceText(target='cprlon1' @(82,18)->(82,25))
def cpr2position(cprlat0, cprlat1, cprlon0, cprlon1, t0, t1): cprlat_even = cprlat0 / 131072.0 cprlat_odd = cprlat1 / 131072.0 cprlon_even = cprlon0 / 131072.0 cprlon_odd = cprlon0 / 131072.0 air_d_lat_even = 360.0 / 60 air_d_lat_odd = 360.0 / 59
def cpr2position(cprlat0, cprlat1, cprlon0, cprlon1, t0, t1): cprlat_even = cprlat0 / 131072.0 cprlat_odd = cprlat1 / 131072.0 cprlon_even = cprlon0 / 131072.0 cprlon_odd = cprlon1 / 131072.0 air_d_lat_even = 360.0 / 60 air_d_lat_odd = 360.0 / 59
131
https://:@github.com/icb-dcm/pyabc.git
2df9fbbc034ecad45cde0e14d0205fc70ae9b90b
@@ -456,5 +456,5 @@ class ABCSMC: return for m in self.history.alive_models(t - 1): - particles, w = self.history.get_distribution(t - 1, m) + particles, w = self.history.get_distribution(m, t - 1) self.transitions[m].fit(particles, w)
pyabc/smc.py
ArgSwap(idxs=0<->1 @(459,27)->(459,56))
class ABCSMC: return for m in self.history.alive_models(t - 1): particles, w = self.history.get_distribution(t - 1, m) self.transitions[m].fit(particles, w)
class ABCSMC: return for m in self.history.alive_models(t - 1): particles, w = self.history.get_distribution(m, t - 1) self.transitions[m].fit(particles, w)
132
https://:@github.com/icb-dcm/pyabc.git
2df9fbbc034ecad45cde0e14d0205fc70ae9b90b
@@ -108,7 +108,7 @@ def abc_model(abc_id, model_id, t): t = history.max_t else: t = int(t) - df, w = history.get_distribution(t, model_id) + df, w = history.get_distribution(model_id, t) df["CDF"] = w tabs = []
pyabc/visserver/server.py
ArgSwap(idxs=0<->1 @(111,12)->(111,36))
def abc_model(abc_id, model_id, t): t = history.max_t else: t = int(t) df, w = history.get_distribution(t, model_id) df["CDF"] = w tabs = []
def abc_model(abc_id, model_id, t): t = history.max_t else: t = int(t) df, w = history.get_distribution(model_id, t) df["CDF"] = w tabs = []
133
https://:@github.com/icb-dcm/pyabc.git
2df9fbbc034ecad45cde0e14d0205fc70ae9b90b
@@ -122,7 +122,7 @@ def test_dataframe_storage_readout(): for m in range(5): pop = pops[(h, m, t)] expected_particles_list = [p.parameter for p in pop] - pars_df, w = h.get_distribution(t, m) + pars_df, w = h.get_distribution(m, t) # use range(len and not zip on dataframe to not stop early # in case of population not completely stored assert np.isclose(w.sum(), 1)
test/test_storage.py
ArgSwap(idxs=0<->1 @(125,29)->(125,47))
def test_dataframe_storage_readout(): for m in range(5): pop = pops[(h, m, t)] expected_particles_list = [p.parameter for p in pop] pars_df, w = h.get_distribution(t, m) # use range(len and not zip on dataframe to not stop early # in case of population not completely stored assert np.isclose(w.sum(), 1)
def test_dataframe_storage_readout(): for m in range(5): pop = pops[(h, m, t)] expected_particles_list = [p.parameter for p in pop] pars_df, w = h.get_distribution(m, t) # use range(len and not zip on dataframe to not stop early # in case of population not completely stored assert np.isclose(w.sum(), 1)
134
https://:@github.com/llllllllll/codetransformer.git
7c327683df810265d01a995d2704c9f8218b0ef7
@@ -141,7 +141,7 @@ class Instruction(InstructionMeta._marker, metaclass=InstructionMeta): 'little', ) - yield cls(arg) + yield instr(arg) @classmethod def from_opcode(cls, opcode):
codetransformer/instructions.py
ReplaceText(target='instr' @(144,18)->(144,21))
class Instruction(InstructionMeta._marker, metaclass=InstructionMeta): 'little', ) yield cls(arg) @classmethod def from_opcode(cls, opcode):
class Instruction(InstructionMeta._marker, metaclass=InstructionMeta): 'little', ) yield instr(arg) @classmethod def from_opcode(cls, opcode):
135
https://:@github.com/richardkiss/pycoin.git
c6b3b2e0d7167d4566dc1d90258d6b98dba8bb65
@@ -197,7 +197,7 @@ def eval_script(script, signature_for_hash_type_f, lock_time, expected_hash_type # Subset of script starting at the most recent codeseparator op_checksig(stack, signature_for_hash_type_f, expected_hash_type, script[begin_code_hash:], flags) if opcode == opcodes.OP_CHECKSIGVERIFY: - if bool_from_script_bytes(stack.pop()): + if not bool_from_script_bytes(stack.pop()): raise ScriptError("VERIFY failed at %d" % (pc-1)) continue
pycoin/tx/script/vm.py
ReplaceText(target='not ' @(200,23)->(200,23))
def eval_script(script, signature_for_hash_type_f, lock_time, expected_hash_type # Subset of script starting at the most recent codeseparator op_checksig(stack, signature_for_hash_type_f, expected_hash_type, script[begin_code_hash:], flags) if opcode == opcodes.OP_CHECKSIGVERIFY: if bool_from_script_bytes(stack.pop()): raise ScriptError("VERIFY failed at %d" % (pc-1)) continue
def eval_script(script, signature_for_hash_type_f, lock_time, expected_hash_type # Subset of script starting at the most recent codeseparator op_checksig(stack, signature_for_hash_type_f, expected_hash_type, script[begin_code_hash:], flags) if opcode == opcodes.OP_CHECKSIGVERIFY: if not bool_from_script_bytes(stack.pop()): raise ScriptError("VERIFY failed at %d" % (pc-1)) continue
136
https://:@github.com/richardkiss/pycoin.git
6d1df60ddb054d1510f38231a529ccf35a73525a
@@ -302,7 +302,7 @@ def generate_output(args, output_dict, output_order): if len(output_order) == 0: print("no output: use -j option to see keys") - elif len(output_order) == 1: + elif len(output_dict) == 1: print(output_dict[output_order[0][0]]) else: dump_output(output_dict, output_order)
pycoin/cmds/ku.py
ReplaceText(target='output_dict' @(305,13)->(305,25))
def generate_output(args, output_dict, output_order): if len(output_order) == 0: print("no output: use -j option to see keys") elif len(output_order) == 1: print(output_dict[output_order[0][0]]) else: dump_output(output_dict, output_order)
def generate_output(args, output_dict, output_order): if len(output_order) == 0: print("no output: use -j option to see keys") elif len(output_dict) == 1: print(output_dict[output_order[0][0]]) else: dump_output(output_dict, output_order)
137
https://:@github.com/richardkiss/pycoin.git
02a225ef6056cc5a7fa19a731e14fbda94522c55
@@ -37,7 +37,7 @@ def deterministic_generate_k(generator_order, secret_exponent, val, hash_f=hashl shift = 8 * hash_size - bln if shift > 0: val >>= shift - if val > n: + if val >= n: val -= n h1 = intstream.to_bytes(val, length=order_size) k = hmac.new(k, v + b'\x00' + priv + h1, hash_f).digest()
pycoin/ecdsa/rfc6979.py
ReplaceText(target='>=' @(40,11)->(40,12))
def deterministic_generate_k(generator_order, secret_exponent, val, hash_f=hashl shift = 8 * hash_size - bln if shift > 0: val >>= shift if val > n: val -= n h1 = intstream.to_bytes(val, length=order_size) k = hmac.new(k, v + b'\x00' + priv + h1, hash_f).digest()
def deterministic_generate_k(generator_order, secret_exponent, val, hash_f=hashl shift = 8 * hash_size - bln if shift > 0: val >>= shift if val >= n: val -= n h1 = intstream.to_bytes(val, length=order_size) k = hmac.new(k, v + b'\x00' + priv + h1, hash_f).digest()
138
https://:@github.com/er1iang/hfut-stu-lib.git
6bdc0e5591564da7b5fe2acd60bde0cb8b2b46f6
@@ -60,7 +60,7 @@ class TestUtil(TestBase): '172.18.6.98', '172.18.6.99' ]) - assert len(r) == 1 + assert len(r) <= 1 with pytest.raises(ValueError): util.get_host_speed_rank(['qq.com']) assert util.get_host_speed_rank(timeout=0) == []
tests/test_util.py
ReplaceText(target='<=' @(63,22)->(63,24))
class TestUtil(TestBase): '172.18.6.98', '172.18.6.99' ]) assert len(r) == 1 with pytest.raises(ValueError): util.get_host_speed_rank(['qq.com']) assert util.get_host_speed_rank(timeout=0) == []
class TestUtil(TestBase): '172.18.6.98', '172.18.6.99' ]) assert len(r) <= 1 with pytest.raises(ValueError): util.get_host_speed_rank(['qq.com']) assert util.get_host_speed_rank(timeout=0) == []
139
https://:@github.com/acorg/dark-matter.git
7837baf17ff17925b5a56178358b3ec478635c9b
@@ -109,7 +109,7 @@ def main(): consensus.id = args.id elif args.idLambda is not None: idLambda = eval(args.idLambda) - consensus.id = idLambda(args.id) + consensus.id = idLambda(consensus.id) print(consensus.toString('fasta'), end='')
bin/make-consensus.py
ReplaceText(target='consensus' @(112,32)->(112,36))
def main(): consensus.id = args.id elif args.idLambda is not None: idLambda = eval(args.idLambda) consensus.id = idLambda(args.id) print(consensus.toString('fasta'), end='')
def main(): consensus.id = args.id elif args.idLambda is not None: idLambda = eval(args.idLambda) consensus.id = idLambda(consensus.id) print(consensus.toString('fasta'), end='')
140
https://:@github.com/Netflix/security_monkey.git
b6356189f8c9e407e4c017bbbf31d8f32aa004a9
@@ -122,7 +122,7 @@ class SNSAuditor(Auditor): else: arn = ARN(princ_aws) if arn.error: - self.add_issue(3, 'Auditor could not parse ARN', snsitem, notes=entry) + self.add_issue(3, 'Auditor could not parse ARN', snsitem, notes=princ_aws) else: account_numbers.append(arn.account_number)
security_monkey/auditors/sns.py
ReplaceText(target='princ_aws' @(125,88)->(125,93))
class SNSAuditor(Auditor): else: arn = ARN(princ_aws) if arn.error: self.add_issue(3, 'Auditor could not parse ARN', snsitem, notes=entry) else: account_numbers.append(arn.account_number)
class SNSAuditor(Auditor): else: arn = ARN(princ_aws) if arn.error: self.add_issue(3, 'Auditor could not parse ARN', snsitem, notes=princ_aws) else: account_numbers.append(arn.account_number)
141
https://:@github.com/SetBased/py-stratum.git
0a4f1f580810e466e6384395832353a77b8e909f
@@ -26,7 +26,7 @@ class Connection: else: return_value = config.get(section, option, fallback=fallback) - if fallback is not None and return_value is None: + if fallback is None and return_value is None: raise KeyError("Option '%s' is not found in section '%s'." % (option, section)) return return_value
pystratum/Connection.py
ReplaceText(target=' is ' @(29,19)->(29,27))
class Connection: else: return_value = config.get(section, option, fallback=fallback) if fallback is not None and return_value is None: raise KeyError("Option '%s' is not found in section '%s'." % (option, section)) return return_value
class Connection: else: return_value = config.get(section, option, fallback=fallback) if fallback is None and return_value is None: raise KeyError("Option '%s' is not found in section '%s'." % (option, section)) return return_value
142
https://:@github.com/awslabs/aws-service-catalog-puppet-framework.git
8ec184735fbc4d8f72484d5baa0e357bbdbeb9d8
@@ -85,7 +85,7 @@ def test_deploy_launches_task_builder_for_account_launch_region(sut, mocker, sha assert len(actual_all_tasks.keys()) == 1 assert actual_all_tasks == expected_all_tasks mocked_get_required_params.assert_called_once_with( - region_name, launch_details.get('portfolio'), launch_details.get('product'), launch_details.get('version'), puppet_account_id + region_name, launch_details.get('portfolio'), launch_details.get('product'), launch_details.get('version'), account_id ) mocked_get_parameters_for_launch.assert_called_once_with( required_parameters,
servicecatalog_puppet/cli_command_helpers_unit_test.py
ReplaceText(target='account_id' @(88,116)->(88,133))
def test_deploy_launches_task_builder_for_account_launch_region(sut, mocker, sha assert len(actual_all_tasks.keys()) == 1 assert actual_all_tasks == expected_all_tasks mocked_get_required_params.assert_called_once_with( region_name, launch_details.get('portfolio'), launch_details.get('product'), launch_details.get('version'), puppet_account_id ) mocked_get_parameters_for_launch.assert_called_once_with( required_parameters,
def test_deploy_launches_task_builder_for_account_launch_region(sut, mocker, sha assert len(actual_all_tasks.keys()) == 1 assert actual_all_tasks == expected_all_tasks mocked_get_required_params.assert_called_once_with( region_name, launch_details.get('portfolio'), launch_details.get('product'), launch_details.get('version'), account_id ) mocked_get_parameters_for_launch.assert_called_once_with( required_parameters,
143
https://:@github.com/kivy-garden/graph.git
b6ec8765b231bfbc1cf0c585243eb43379bb8946
@@ -448,7 +448,7 @@ class Graph(Widget): ymin = self.ymin ymax = self.ymax if ylog: - xmin = log10(ymin) + ymin = log10(ymin) ymax = log10(ymax) if len(xpoints): top = size[3] if self.x_grid else metrics.dp(12) + size[1]
__init__.py
ReplaceText(target='ymin' @(451,12)->(451,16))
class Graph(Widget): ymin = self.ymin ymax = self.ymax if ylog: xmin = log10(ymin) ymax = log10(ymax) if len(xpoints): top = size[3] if self.x_grid else metrics.dp(12) + size[1]
class Graph(Widget): ymin = self.ymin ymax = self.ymax if ylog: ymin = log10(ymin) ymax = log10(ymax) if len(xpoints): top = size[3] if self.x_grid else metrics.dp(12) + size[1]
144
https://:@github.com/irmen/synthesizer.git
e1da70408c0fce248edc4542e2f94e8ea6ea72b9
@@ -31,7 +31,7 @@ class DecodedSoundFile: self.sample_format = sample_format # one of the ma_format_ values self.sample_format_name = ffi.string(lib.ma_get_format_name(sample_format)).decode() self.samples = samples - self.num_frames = len(samples) / self.nchannels + self.num_frames = len(samples) // self.nchannels self.duration = self.num_frames / self.sample_rate
pyminiaudio/miniaudio.py
ReplaceText(target='//' @(34,39)->(34,40))
class DecodedSoundFile: self.sample_format = sample_format # one of the ma_format_ values self.sample_format_name = ffi.string(lib.ma_get_format_name(sample_format)).decode() self.samples = samples self.num_frames = len(samples) / self.nchannels self.duration = self.num_frames / self.sample_rate
class DecodedSoundFile: self.sample_format = sample_format # one of the ma_format_ values self.sample_format_name = ffi.string(lib.ma_get_format_name(sample_format)).decode() self.samples = samples self.num_frames = len(samples) // self.nchannels self.duration = self.num_frames / self.sample_rate
145
https://:@github.com/pydata/numexpr.git
d11ef8a9dec059b679e773ad8fa54dd4f462d2e8
@@ -304,7 +304,7 @@ class test_numexpr(TestCase): assert_equal(res, b) a = False res = evaluate('where(a, b, c)') - assert_equal(res, b) + assert_equal(res, c)
numexpr/tests/test_numexpr.py
ReplaceText(target='c' @(307,26)->(307,27))
class test_numexpr(TestCase): assert_equal(res, b) a = False res = evaluate('where(a, b, c)') assert_equal(res, b)
class test_numexpr(TestCase): assert_equal(res, b) a = False res = evaluate('where(a, b, c)') assert_equal(res, c)
146
https://:@github.com/aio-libs/aiozipkin.git
73a9594e475a65d11f32ae1ec48d3a324d2cb2b2
@@ -23,7 +23,7 @@ class Transport: self._queue.append(data) async def _sender_loop(self): - while self._ender.done(): + while not self._ender.done(): if len(self._queue) != 0: await self._send()
aiozipkin/transport.py
ReplaceText(target='not ' @(26,14)->(26,14))
class Transport: self._queue.append(data) async def _sender_loop(self): while self._ender.done(): if len(self._queue) != 0: await self._send()
class Transport: self._queue.append(data) async def _sender_loop(self): while not self._ender.done(): if len(self._queue) != 0: await self._send()
147
https://:@gitlab.com/deliberist/xdgenvpy.git
cce7b99c90cd770aee31195f98e23169405d31c2
@@ -96,7 +96,7 @@ def print_vars(xdg, variables): :param list variables: A sequence of XDG variables to print. """ for var in variables: - if not (str(var).startswith('XDG_') or hasattr(xdg, var)): + if not (str(var).startswith('XDG_') and hasattr(xdg, var)): LOG.error('Invalid XDG variable: %s', var) else: value = getattr(xdg, var)
xdgenvpy/__main__.py
ReplaceText(target='and' @(99,44)->(99,46))
def print_vars(xdg, variables): :param list variables: A sequence of XDG variables to print. """ for var in variables: if not (str(var).startswith('XDG_') or hasattr(xdg, var)): LOG.error('Invalid XDG variable: %s', var) else: value = getattr(xdg, var)
def print_vars(xdg, variables): :param list variables: A sequence of XDG variables to print. """ for var in variables: if not (str(var).startswith('XDG_') and hasattr(xdg, var)): LOG.error('Invalid XDG variable: %s', var) else: value = getattr(xdg, var)
148
https://:@github.com/theislab/anndata.git
908bbbd7c0ad3e61e7db0441fbe9ec93091a0dd5
@@ -850,7 +850,7 @@ class AnnData(IndexMixin): categories=ddata[k]) if k_stripped in var: var[k_stripped] = pd.Categorical.from_codes( - codes=smp[k_stripped].values, + codes=var[k_stripped].values, categories=ddata[k]) k_to_delete.append(k)
anndata/anndata.py
ReplaceText(target='var' @(853,34)->(853,37))
class AnnData(IndexMixin): categories=ddata[k]) if k_stripped in var: var[k_stripped] = pd.Categorical.from_codes( codes=smp[k_stripped].values, categories=ddata[k]) k_to_delete.append(k)
class AnnData(IndexMixin): categories=ddata[k]) if k_stripped in var: var[k_stripped] = pd.Categorical.from_codes( codes=var[k_stripped].values, categories=ddata[k]) k_to_delete.append(k)
149
https://:@github.com/theislab/anndata.git
0c6ad8700a028675f25e6a769aaf39db3f2b8893
@@ -104,7 +104,7 @@ def write_loom( elif len(adata.obsm.keys()) > 0 or len(adata.varm.keys()) > 0: logger.warning( f'The loom file will lack these fields:\n' - f'{adata.obsm.keys() + adata.varm.keys()}\n' + f'{adata.obsm.keys() | adata.varm.keys()}\n' f'Use write_obsm_varm=True to export multi-dimensional annotations' )
anndata/readwrite/write.py
ReplaceText(target='|' @(107,33)->(107,34))
def write_loom( elif len(adata.obsm.keys()) > 0 or len(adata.varm.keys()) > 0: logger.warning( f'The loom file will lack these fields:\n' f'{adata.obsm.keys() + adata.varm.keys()}\n' f'Use write_obsm_varm=True to export multi-dimensional annotations' )
def write_loom( elif len(adata.obsm.keys()) > 0 or len(adata.varm.keys()) > 0: logger.warning( f'The loom file will lack these fields:\n' f'{adata.obsm.keys() | adata.varm.keys()}\n' f'Use write_obsm_varm=True to export multi-dimensional annotations' )
150
https://:@github.com/atarashansky/self-assembling-manifold.git
a8a2172a308d8fb99fc3981e64c1cc358d4d3355
@@ -965,7 +965,7 @@ class SAMGUI(object): else: return; #quit - markers = ut.find_corr_genes(s,txt).flatten() + markers = ut.find_corr_genes(gene,txt).flatten() _,i = np.unique(markers,return_index=True) markers=markers[np.sort(i)] self.marker_genes[self.stab.selected_index] = markers
SAMGUI.py
ReplaceText(target='gene' @(968,37)->(968,38))
class SAMGUI(object): else: return; #quit markers = ut.find_corr_genes(s,txt).flatten() _,i = np.unique(markers,return_index=True) markers=markers[np.sort(i)] self.marker_genes[self.stab.selected_index] = markers
class SAMGUI(object): else: return; #quit markers = ut.find_corr_genes(gene,txt).flatten() _,i = np.unique(markers,return_index=True) markers=markers[np.sort(i)] self.marker_genes[self.stab.selected_index] = markers
151
https://:@github.com/atarashansky/self-assembling-manifold.git
6539b0d87edc6dd18bd1bd3709fc41ad2c51b3f3
@@ -248,7 +248,7 @@ def search_string(vec, s, case_sensitive=False, invert=False): i = len(V) V = np.concatenate(V); M = np.concatenate(M); if i > 1: - ix = np.sort(np.unique(V,return_index=True)[1]) + ix = np.sort(np.unique(M,return_index=True)[1]) V=V[ix]; M=M[ix]; return V,M else:
samalg/utilities.py
ReplaceText(target='M' @(251,35)->(251,36))
def search_string(vec, s, case_sensitive=False, invert=False): i = len(V) V = np.concatenate(V); M = np.concatenate(M); if i > 1: ix = np.sort(np.unique(V,return_index=True)[1]) V=V[ix]; M=M[ix]; return V,M else:
def search_string(vec, s, case_sensitive=False, invert=False): i = len(V) V = np.concatenate(V); M = np.concatenate(M); if i > 1: ix = np.sort(np.unique(M,return_index=True)[1]) V=V[ix]; M=M[ix]; return V,M else:
152
https://:@github.com/den4uk/andriller.git
a5af3433a9caa6f05d8d43aef5abc8f405349f67
@@ -379,7 +379,7 @@ class MainWindow(BaseWindow): self.menubar.add_cascade(menu=menu_help, label='Help', underline=0) menu_help.add_command(label='Visit website') menu_help.add_separator() - if getattr(sys, 'frozen', False): + if not getattr(sys, 'frozen', False): menu_help.add_command(label='Run Update', command=lambda: self.conf.upgrade_package(logger=self.logger)) menu_help.add_separator() menu_help.add_command(label='About', command=self.about_msg)
andriller/windows.py
ReplaceText(target='not ' @(382,11)->(382,11))
class MainWindow(BaseWindow): self.menubar.add_cascade(menu=menu_help, label='Help', underline=0) menu_help.add_command(label='Visit website') menu_help.add_separator() if getattr(sys, 'frozen', False): menu_help.add_command(label='Run Update', command=lambda: self.conf.upgrade_package(logger=self.logger)) menu_help.add_separator() menu_help.add_command(label='About', command=self.about_msg)
class MainWindow(BaseWindow): self.menubar.add_cascade(menu=menu_help, label='Help', underline=0) menu_help.add_command(label='Visit website') menu_help.add_separator() if not getattr(sys, 'frozen', False): menu_help.add_command(label='Run Update', command=lambda: self.conf.upgrade_package(logger=self.logger)) menu_help.add_separator() menu_help.add_command(label='About', command=self.about_msg)
153
https://:@github.com/fabiencro/knmt.git
26b7b1ed8dfbc0e82804e86c149926fc96cd8d84
@@ -699,7 +699,7 @@ def do_eval(config_eval): assert len(encdec_list) == 1 scorer = encdec_list[0].nbest_scorer(src_batch, src_mask) - nb_batches = (len(tgt_list) + mb_size - 1) / mb_size + nb_batches = (len(tgt_list) + mb_size - 1) // mb_size for num_batch in six.moves.range(nb_batches): tgt_batch, arg_sort = utils.make_batch_tgt(tgt_list[num_batch * nb_batches: (num_batch + 1) * nb_batches], eos_idx=eos_idx, gpu=gpu, volatile="on", need_arg_sort=True)
nmt_chainer/translation/eval.py
ReplaceText(target='//' @(702,55)->(702,56))
def do_eval(config_eval): assert len(encdec_list) == 1 scorer = encdec_list[0].nbest_scorer(src_batch, src_mask) nb_batches = (len(tgt_list) + mb_size - 1) / mb_size for num_batch in six.moves.range(nb_batches): tgt_batch, arg_sort = utils.make_batch_tgt(tgt_list[num_batch * nb_batches: (num_batch + 1) * nb_batches], eos_idx=eos_idx, gpu=gpu, volatile="on", need_arg_sort=True)
def do_eval(config_eval): assert len(encdec_list) == 1 scorer = encdec_list[0].nbest_scorer(src_batch, src_mask) nb_batches = (len(tgt_list) + mb_size - 1) // mb_size for num_batch in six.moves.range(nb_batches): tgt_batch, arg_sort = utils.make_batch_tgt(tgt_list[num_batch * nb_batches: (num_batch + 1) * nb_batches], eos_idx=eos_idx, gpu=gpu, volatile="on", need_arg_sort=True)
154
https://:@github.com/fabiencro/knmt.git
8ca17b7d3b52100a3c36ae7a380efbf0ce42107d
@@ -699,7 +699,7 @@ def do_eval(config_eval): assert len(encdec_list) == 1 scorer = encdec_list[0].nbest_scorer(src_batch, src_mask) - nb_batches = (len(tgt_list) + mb_size - 1) / mb_size + nb_batches = (len(tgt_list) + mb_size - 1) // mb_size for num_batch in six.moves.range(nb_batches): tgt_batch, arg_sort = utils.make_batch_tgt(tgt_list[num_batch * nb_batches: (num_batch + 1) * nb_batches], eos_idx=eos_idx, gpu=gpu, volatile="on", need_arg_sort=True)
nmt_chainer/translation/eval.py
ReplaceText(target='//' @(702,55)->(702,56))
def do_eval(config_eval): assert len(encdec_list) == 1 scorer = encdec_list[0].nbest_scorer(src_batch, src_mask) nb_batches = (len(tgt_list) + mb_size - 1) / mb_size for num_batch in six.moves.range(nb_batches): tgt_batch, arg_sort = utils.make_batch_tgt(tgt_list[num_batch * nb_batches: (num_batch + 1) * nb_batches], eos_idx=eos_idx, gpu=gpu, volatile="on", need_arg_sort=True)
def do_eval(config_eval): assert len(encdec_list) == 1 scorer = encdec_list[0].nbest_scorer(src_batch, src_mask) nb_batches = (len(tgt_list) + mb_size - 1) // mb_size for num_batch in six.moves.range(nb_batches): tgt_batch, arg_sort = utils.make_batch_tgt(tgt_list[num_batch * nb_batches: (num_batch + 1) * nb_batches], eos_idx=eos_idx, gpu=gpu, volatile="on", need_arg_sort=True)
155
https://:@github.com/fabiencro/knmt.git
b091633d528161ceba3b63c501774627a2c927aa
@@ -1127,7 +1127,7 @@ def build_dataset_one_side_pp(src_fn, src_pp, max_nb_ex=None, make_constraints=N # print(len(sentence_tgt), len(sentence_src)) seq_src = src_pp.convert(sentence_src, stats=stats_src) if make_constraints is not None: - constraints_fn = make_constraints(src, seq_src) + constraints_fn = make_constraints(sentence_src, seq_src) constraints_list.append(constraints_fn) res.append(seq_src) if make_constraints is not None:
nmt_chainer/dataprocessing/processors.py
ReplaceText(target='sentence_src' @(1130,46)->(1130,49))
def build_dataset_one_side_pp(src_fn, src_pp, max_nb_ex=None, make_constraints=N # print(len(sentence_tgt), len(sentence_src)) seq_src = src_pp.convert(sentence_src, stats=stats_src) if make_constraints is not None: constraints_fn = make_constraints(src, seq_src) constraints_list.append(constraints_fn) res.append(seq_src) if make_constraints is not None:
def build_dataset_one_side_pp(src_fn, src_pp, max_nb_ex=None, make_constraints=N # print(len(sentence_tgt), len(sentence_src)) seq_src = src_pp.convert(sentence_src, stats=stats_src) if make_constraints is not None: constraints_fn = make_constraints(sentence_src, seq_src) constraints_list.append(constraints_fn) res.append(seq_src) if make_constraints is not None:
156
https://:@github.com/biocore/emperor.git
dedef9ab8d8578dd9d8d002dfa2dde254f27b133
@@ -100,7 +100,7 @@ def main(): sids_intersection = len(set(zip(*mapping_data)[0]) & set(parsed_coords[0])) # sample ids must be shared between files - if sids_intersection > 0: + if sids_intersection <= 0: option_parser.error('The sample identifiers in the coordinates file ' 'must have at least one match with the data contained in mapping ' 'file. Verify you are using a coordinates file and a mapping file '
scripts/make_emperor.py
ReplaceText(target='<=' @(103,25)->(103,26))
def main(): sids_intersection = len(set(zip(*mapping_data)[0]) & set(parsed_coords[0])) # sample ids must be shared between files if sids_intersection > 0: option_parser.error('The sample identifiers in the coordinates file ' 'must have at least one match with the data contained in mapping ' 'file. Verify you are using a coordinates file and a mapping file '
def main(): sids_intersection = len(set(zip(*mapping_data)[0]) & set(parsed_coords[0])) # sample ids must be shared between files if sids_intersection <= 0: option_parser.error('The sample identifiers in the coordinates file ' 'must have at least one match with the data contained in mapping ' 'file. Verify you are using a coordinates file and a mapping file '
157
https://:@github.com/juanpex/django-model-report.git
0aa6d98497f25d178fe48ed9185b77e8d62e722b
@@ -58,7 +58,7 @@ class ExcelExporter(Exporter): for g, rows in report_rows: if g: - sheet1.write(row_index, 0, u'%s' % x, stylebold) + sheet1.write(row_index, 0, u'%s' % g, stylebold) row_index += 1 for row in list(rows): if row.is_value():
model_report/exporters/excel.py
ReplaceText(target='g' @(61,51)->(61,52))
class ExcelExporter(Exporter): for g, rows in report_rows: if g: sheet1.write(row_index, 0, u'%s' % x, stylebold) row_index += 1 for row in list(rows): if row.is_value():
class ExcelExporter(Exporter): for g, rows in report_rows: if g: sheet1.write(row_index, 0, u'%s' % g, stylebold) row_index += 1 for row in list(rows): if row.is_value():
158
https://:@github.com/hclivess/Bismuth.git
a969c471f3d255e9cd139c100ac942c441c51aa0
@@ -120,7 +120,7 @@ def difficulty(c): time_now = time.time() if time_now > timestamp_last + 300: #if 5 minutes have passed - difficulty2 = percentage(97,diff_block_previous) + difficulty2 = percentage(97,difficulty) else: difficulty2 = difficulty
gui.py
ReplaceText(target='difficulty' @(123,36)->(123,55))
def difficulty(c): time_now = time.time() if time_now > timestamp_last + 300: #if 5 minutes have passed difficulty2 = percentage(97,diff_block_previous) else: difficulty2 = difficulty
def difficulty(c): time_now = time.time() if time_now > timestamp_last + 300: #if 5 minutes have passed difficulty2 = percentage(97,difficulty) else: difficulty2 = difficulty
159
https://:@github.com/hclivess/Bismuth.git
75e47ec06ac2c83df3fbe413086393d265f27f1c
@@ -1097,7 +1097,7 @@ def manager(c, conn): app_log.warning("Only {} connections active, resetting the connection history".format(len(connection_pool))) del tried[:] - if nodes_ban_reset and len(connection_pool) < len(banlist) and int(time.time() - reset_time) > 60*10: #do not reset too often. 10 minutes here + if nodes_ban_reset and len(connection_pool) <= len(banlist) and int(time.time() - reset_time) > 60*10: #do not reset too often. 10 minutes here app_log.warning("Less active connections ({}) than banlist ({}), resetting banlist and tried" .format(len(connection_pool), len(banlist))) del banlist[:] banlist.extend(config.banlist) # reset to config version
node.py
ReplaceText(target='<=' @(1100,52)->(1100,53))
def manager(c, conn): app_log.warning("Only {} connections active, resetting the connection history".format(len(connection_pool))) del tried[:] if nodes_ban_reset and len(connection_pool) < len(banlist) and int(time.time() - reset_time) > 60*10: #do not reset too often. 10 minutes here app_log.warning("Less active connections ({}) than banlist ({}), resetting banlist and tried" .format(len(connection_pool), len(banlist))) del banlist[:] banlist.extend(config.banlist) # reset to config version
def manager(c, conn): app_log.warning("Only {} connections active, resetting the connection history".format(len(connection_pool))) del tried[:] if nodes_ban_reset and len(connection_pool) <= len(banlist) and int(time.time() - reset_time) > 60*10: #do not reset too often. 10 minutes here app_log.warning("Less active connections ({}) than banlist ({}), resetting banlist and tried" .format(len(connection_pool), len(banlist))) del banlist[:] banlist.extend(config.banlist) # reset to config version
160
https://:@github.com/hclivess/Bismuth.git
34d72e31b605ce74fc26bd9dac9cedd0c659d5b0
@@ -204,7 +204,7 @@ class MainHandler(tornado.web.RequestHandler): html.append("<table class='table table-responsive'>") html.append("<tr><th>Statistics for the last 500 blocks</th>") - html.append("<tr><td>Kilobytes transferred: </td><td>{}</td>".format(transferred_total)) + html.append("<tr><td>Kilobytes transferred: </td><td>{}</td>".format(data_total)) html.append("<tr><td>Transactions: </td><td>{}</td>".format(tx_count)) html.append("<tr><td>Transactions per block: </td><td>{}</td>".format(tx_count/500)) html.append("<tr><td>Total BIS transferred </td><td>{}</td>".format(transferred_total))
ledger_explorer.py
ReplaceText(target='data_total' @(207,77)->(207,94))
class MainHandler(tornado.web.RequestHandler): html.append("<table class='table table-responsive'>") html.append("<tr><th>Statistics for the last 500 blocks</th>") html.append("<tr><td>Kilobytes transferred: </td><td>{}</td>".format(transferred_total)) html.append("<tr><td>Transactions: </td><td>{}</td>".format(tx_count)) html.append("<tr><td>Transactions per block: </td><td>{}</td>".format(tx_count/500)) html.append("<tr><td>Total BIS transferred </td><td>{}</td>".format(transferred_total))
class MainHandler(tornado.web.RequestHandler): html.append("<table class='table table-responsive'>") html.append("<tr><th>Statistics for the last 500 blocks</th>") html.append("<tr><td>Kilobytes transferred: </td><td>{}</td>".format(data_total)) html.append("<tr><td>Transactions: </td><td>{}</td>".format(tx_count)) html.append("<tr><td>Transactions per block: </td><td>{}</td>".format(tx_count/500)) html.append("<tr><td>Total BIS transferred </td><td>{}</td>".format(transferred_total))
161
https://:@github.com/hclivess/Bismuth.git
e6dd78e38707d04ea401ef86960e9deb08ea59b7
@@ -520,7 +520,7 @@ def difficulty(c, mode): block_height = int(result[0]) timestamp_before_last = Decimal(c.fetchone()[1]) - if block_height > 427000: #remove code ABOVE after hf + if block_height >= 427000: #remove code ABOVE after hf execute(c, "SELECT * FROM transactions WHERE reward != 0 ORDER BY block_height DESC LIMIT 2") result = c.fetchone() timestamp_last = Decimal(result[1])
node.py
ReplaceText(target='>=' @(523,20)->(523,21))
def difficulty(c, mode): block_height = int(result[0]) timestamp_before_last = Decimal(c.fetchone()[1]) if block_height > 427000: #remove code ABOVE after hf execute(c, "SELECT * FROM transactions WHERE reward != 0 ORDER BY block_height DESC LIMIT 2") result = c.fetchone() timestamp_last = Decimal(result[1])
def difficulty(c, mode): block_height = int(result[0]) timestamp_before_last = Decimal(c.fetchone()[1]) if block_height >= 427000: #remove code ABOVE after hf execute(c, "SELECT * FROM transactions WHERE reward != 0 ORDER BY block_height DESC LIMIT 2") result = c.fetchone() timestamp_last = Decimal(result[1])
162
https://:@github.com/hclivess/Bismuth.git
01139c3f9457e2c4dbe7e143c47aa8475f7433cc
@@ -9,7 +9,7 @@ def keys_load(privkey_file, pubkey_file): pubkey_loaded = open(pubkey_file, 'rb').read() pubkey = VerifyingKey.from_string(pubkey_loaded, curve=SECP256k1) - address = blake2b(privkey.to_string(), digest_size=20).hexdigest() + address = blake2b(pubkey.to_string(), digest_size=20).hexdigest() return privkey, pubkey, address
bisecdsa.py
ReplaceText(target='pubkey' @(12,22)->(12,29))
def keys_load(privkey_file, pubkey_file): pubkey_loaded = open(pubkey_file, 'rb').read() pubkey = VerifyingKey.from_string(pubkey_loaded, curve=SECP256k1) address = blake2b(privkey.to_string(), digest_size=20).hexdigest() return privkey, pubkey, address
def keys_load(privkey_file, pubkey_file): pubkey_loaded = open(pubkey_file, 'rb').read() pubkey = VerifyingKey.from_string(pubkey_loaded, curve=SECP256k1) address = blake2b(pubkey.to_string(), digest_size=20).hexdigest() return privkey, pubkey, address
163
https://:@github.com/hclivess/Bismuth.git
b4f1e59d63db962b8f674dc2f4e6e0df34e8404d
@@ -9,7 +9,7 @@ def keys_load(privkey_file, pubkey_file): pubkey_loaded = open(pubkey_file, 'rb').read() pubkey = VerifyingKey.from_string(pubkey_loaded, curve=SECP256k1) - address = blake2b(pubkey.to_string(), digest_size=20).hexdigest() + address = blake2b(privkey.to_string(), digest_size=20).hexdigest() return privkey, pubkey, address
bisecdsa.py
ReplaceText(target='privkey' @(12,22)->(12,28))
def keys_load(privkey_file, pubkey_file): pubkey_loaded = open(pubkey_file, 'rb').read() pubkey = VerifyingKey.from_string(pubkey_loaded, curve=SECP256k1) address = blake2b(pubkey.to_string(), digest_size=20).hexdigest() return privkey, pubkey, address
def keys_load(privkey_file, pubkey_file): pubkey_loaded = open(pubkey_file, 'rb').read() pubkey = VerifyingKey.from_string(pubkey_loaded, curve=SECP256k1) address = blake2b(privkey.to_string(), digest_size=20).hexdigest() return privkey, pubkey, address
164
https://:@github.com/hclivess/Bismuth.git
081200edc97ae9d5e35156d634037844835c7205
@@ -1192,7 +1192,7 @@ def digest_block(data, sdef, peer_ip, conn, c, hdd, h, hdd2, h2, h3, index, inde # if (q_time_now < q_received_timestamp + 432000) and not quicksync: # balance_pre = quantize_eight(credit_ledger - debit_ledger - fees + rewards) # without projection - balance_pre = ledger_balance3(db_address, h2, balances) + balance_pre = ledger_balance3(db_address, c, balances) # balance = quantize_eight(credit - debit - fees + rewards) balance = quantize_eight(balance_pre - block_debit_address) # app_log.info("Digest: Projected transaction address balance: " + str(balance))
node.py
ReplaceText(target='c' @(1195,62)->(1195,64))
def digest_block(data, sdef, peer_ip, conn, c, hdd, h, hdd2, h2, h3, index, inde # if (q_time_now < q_received_timestamp + 432000) and not quicksync: # balance_pre = quantize_eight(credit_ledger - debit_ledger - fees + rewards) # without projection balance_pre = ledger_balance3(db_address, h2, balances) # balance = quantize_eight(credit - debit - fees + rewards) balance = quantize_eight(balance_pre - block_debit_address) # app_log.info("Digest: Projected transaction address balance: " + str(balance))
def digest_block(data, sdef, peer_ip, conn, c, hdd, h, hdd2, h2, h3, index, inde # if (q_time_now < q_received_timestamp + 432000) and not quicksync: # balance_pre = quantize_eight(credit_ledger - debit_ledger - fees + rewards) # without projection balance_pre = ledger_balance3(db_address, c, balances) # balance = quantize_eight(credit - debit - fees + rewards) balance = quantize_eight(balance_pre - block_debit_address) # app_log.info("Digest: Projected transaction address balance: " + str(balance))
165
https://:@github.com/RDCH106/parallel_foreach_submodule.git
714ddc620894fbc6fefac093fe9323a9429f85a8
@@ -23,7 +23,7 @@ class PFSProcess(object): if self.__output_filter == "": self.__output += self.__p.communicate()[0].decode('utf-8') # stdoutdata else: - if str(self.__p.communicate()[0].decode('utf-8')).find(self.__output_filter) != -1: + if str(self.__p.communicate()[0].decode('utf-8')).find(self.__output_filter) == -1: self.__output += self.__p.communicate()[0].decode('utf-8') if self.__p.communicate()[1]: # stderrdata
parallelforeachsubmodule/process.py
ReplaceText(target='==' @(26,89)->(26,91))
class PFSProcess(object): if self.__output_filter == "": self.__output += self.__p.communicate()[0].decode('utf-8') # stdoutdata else: if str(self.__p.communicate()[0].decode('utf-8')).find(self.__output_filter) != -1: self.__output += self.__p.communicate()[0].decode('utf-8') if self.__p.communicate()[1]: # stderrdata
class PFSProcess(object): if self.__output_filter == "": self.__output += self.__p.communicate()[0].decode('utf-8') # stdoutdata else: if str(self.__p.communicate()[0].decode('utf-8')).find(self.__output_filter) == -1: self.__output += self.__p.communicate()[0].decode('utf-8') if self.__p.communicate()[1]: # stderrdata
166
https://:@github.com/rsokl/noggin.git
2f9ec33b6807cf8ff00169a284a2e1c3a77db137
@@ -71,7 +71,7 @@ class LivePlot(LiveLogger): if ( not isinstance(size, Sequence) or len(size) != 2 - or not all(isinstance(x, Real) and x >= 0 for x in size) + or not all(isinstance(x, Real) and x > 0 for x in size) ): raise ValueError( f"`size` must be a length-2 sequence of "
src/liveplot/plotter.py
ReplaceText(target='>' @(74,49)->(74,51))
class LivePlot(LiveLogger): if ( not isinstance(size, Sequence) or len(size) != 2 or not all(isinstance(x, Real) and x >= 0 for x in size) ): raise ValueError( f"`size` must be a length-2 sequence of "
class LivePlot(LiveLogger): if ( not isinstance(size, Sequence) or len(size) != 2 or not all(isinstance(x, Real) and x > 0 for x in size) ): raise ValueError( f"`size` must be a length-2 sequence of "
167
https://:@github.com/ome/ome-model.git
99bf44c7c5f8661d4c073b98dcafa980abd44920
@@ -366,5 +366,5 @@ class OMEModel(object): substitutionGroupName = self.opts.lang.substitutionGroup(element.getName()) self.substitutionElement_map[substitutionGroupName] = element continue - if len(self.opts.lang.getSubstitutionTypes()) >= 0: + if len(self.opts.lang.getSubstitutionTypes()) > 0: config.METADATA_OBJECT_IGNORE.remove('BinData')
components/xsd-fu/python/ome/modeltools/model.py
ReplaceText(target='>' @(369,54)->(369,56))
class OMEModel(object): substitutionGroupName = self.opts.lang.substitutionGroup(element.getName()) self.substitutionElement_map[substitutionGroupName] = element continue if len(self.opts.lang.getSubstitutionTypes()) >= 0: config.METADATA_OBJECT_IGNORE.remove('BinData')
class OMEModel(object): substitutionGroupName = self.opts.lang.substitutionGroup(element.getName()) self.substitutionElement_map[substitutionGroupName] = element continue if len(self.opts.lang.getSubstitutionTypes()) > 0: config.METADATA_OBJECT_IGNORE.remove('BinData')
168
https://:@github.com/ome/ome-model.git
49d212302bd30541e55ef64409b14377b069cd3a
@@ -135,7 +135,7 @@ class Image(object): assert (len(self.data["Channels"]) <= sizeC), str(self.data) channel_samples = sum([int(x.data['SamplesPerPixel']) for x in self.data["Channels"]]) - assert channel_samples < sizeC, str(self.data) + assert channel_samples <= sizeC, str(self.data) return self.data
ome_model/experimental.py
ReplaceText(target='<=' @(138,31)->(138,32))
class Image(object): assert (len(self.data["Channels"]) <= sizeC), str(self.data) channel_samples = sum([int(x.data['SamplesPerPixel']) for x in self.data["Channels"]]) assert channel_samples < sizeC, str(self.data) return self.data
class Image(object): assert (len(self.data["Channels"]) <= sizeC), str(self.data) channel_samples = sum([int(x.data['SamplesPerPixel']) for x in self.data["Channels"]]) assert channel_samples <= sizeC, str(self.data) return self.data
169
https://:@github.com/uber/causalml.git
3c085167f8fbf6a07ef53ad84f36682e015ff320
@@ -966,7 +966,7 @@ class UpliftTreeClassifier: rightNodeSummary = self.tree_node_summary(w_r, y_r, min_samples_treatment=min_samples_treatment, n_reg=n_reg, - parentNodeSummary=parentNodeSummary) + parentNodeSummary=currentNodeSummary) # check the split validity on min_samples_treatment if set(leftNodeSummary.keys()) != set(rightNodeSummary.keys()):
causalml/inference/tree/models.py
ReplaceText(target='currentNodeSummary' @(969,76)->(969,93))
class UpliftTreeClassifier: rightNodeSummary = self.tree_node_summary(w_r, y_r, min_samples_treatment=min_samples_treatment, n_reg=n_reg, parentNodeSummary=parentNodeSummary) # check the split validity on min_samples_treatment if set(leftNodeSummary.keys()) != set(rightNodeSummary.keys()):
class UpliftTreeClassifier: rightNodeSummary = self.tree_node_summary(w_r, y_r, min_samples_treatment=min_samples_treatment, n_reg=n_reg, parentNodeSummary=currentNodeSummary) # check the split validity on min_samples_treatment if set(leftNodeSummary.keys()) != set(rightNodeSummary.keys()):
170
https://:@github.com/pypa/setuptools_scm.git
3e2ee4c2c77900f2d20241f489a670f7cb512e98
@@ -77,7 +77,7 @@ def test_version_from_hg_id(tmpdir, get_log_version): hg('add test.txt', cwd) hg('commit -m commit -u test -d "0 0"', cwd) - after_first_commit = get_log_version(tmpdir) + after_first_commit = get_log_version(cwd) assert after_first_commit.startswith('0.0.post1-')
test_hgdistver.py
ReplaceText(target='cwd' @(80,41)->(80,47))
def test_version_from_hg_id(tmpdir, get_log_version): hg('add test.txt', cwd) hg('commit -m commit -u test -d "0 0"', cwd) after_first_commit = get_log_version(tmpdir) assert after_first_commit.startswith('0.0.post1-')
def test_version_from_hg_id(tmpdir, get_log_version): hg('add test.txt', cwd) hg('commit -m commit -u test -d "0 0"', cwd) after_first_commit = get_log_version(cwd) assert after_first_commit.startswith('0.0.post1-')
171
https://:@github.com/pypa/setuptools_scm.git
340b2356e8ab2e6525ef1a07d17155db2788ed50
@@ -50,6 +50,6 @@ def scm_find_files(path, scm_files, scm_dirs): # dirpath + filename with symlinks preserved fullfilename = os.path.join(dirpath, filename) if os.path.normcase(os.path.realpath(fullfilename)) in scm_files: - res.append(os.path.join(path, os.path.relpath(fullfilename, path))) + res.append(os.path.join(path, os.path.relpath(fullfilename, realpath))) seen.add(realdirpath) return res
src/setuptools_scm/file_finder.py
ReplaceText(target='realpath' @(53,76)->(53,80))
def scm_find_files(path, scm_files, scm_dirs): # dirpath + filename with symlinks preserved fullfilename = os.path.join(dirpath, filename) if os.path.normcase(os.path.realpath(fullfilename)) in scm_files: res.append(os.path.join(path, os.path.relpath(fullfilename, path))) seen.add(realdirpath) return res
def scm_find_files(path, scm_files, scm_dirs): # dirpath + filename with symlinks preserved fullfilename = os.path.join(dirpath, filename) if os.path.normcase(os.path.realpath(fullfilename)) in scm_files: res.append(os.path.join(path, os.path.relpath(fullfilename, realpath))) seen.add(realdirpath) return res
172
https://:@github.com/alphaomega-technology/Equation.git
66e92f5b6ab584b7e7ac3bb7c328ff4ea410f88e
@@ -600,7 +600,7 @@ class Expression( object ): continue fs = self.__getfunction(op) while True: - if (fn['prec'] <= fs['prec']): + if (fn['prec'] >= fs['prec']): self.__expr.append(ExpressionFunction(fs['func'],fs['args'],fs['str'],fs['latex'],op[0],False)) if len(stack) == 0: stack.append(v)
Equation/core.py
ReplaceText(target='>=' @(603,35)->(603,37))
class Expression( object ): continue fs = self.__getfunction(op) while True: if (fn['prec'] <= fs['prec']): self.__expr.append(ExpressionFunction(fs['func'],fs['args'],fs['str'],fs['latex'],op[0],False)) if len(stack) == 0: stack.append(v)
class Expression( object ): continue fs = self.__getfunction(op) while True: if (fn['prec'] >= fs['prec']): self.__expr.append(ExpressionFunction(fs['func'],fs['args'],fs['str'],fs['latex'],op[0],False)) if len(stack) == 0: stack.append(v)
173
https://:@github.com/Azure/azure-uamqp-python.git
29706fc2599f09f186f85fd15bf243b5ed60477f
@@ -499,7 +499,7 @@ class SendClient(AMQPClient): message.state = constants.MessageState.SendComplete message._response = errors.MessageAlreadySettled() # pylint: disable=protected-access if message.on_send_complete: - message.on_send_complete(result, delivery_state) + message.on_send_complete(result, exception) def _filter_pending(self, message): if message.state in constants.DONE_STATES:
uamqp/client.py
ReplaceText(target='exception' @(502,45)->(502,59))
class SendClient(AMQPClient): message.state = constants.MessageState.SendComplete message._response = errors.MessageAlreadySettled() # pylint: disable=protected-access if message.on_send_complete: message.on_send_complete(result, delivery_state) def _filter_pending(self, message): if message.state in constants.DONE_STATES:
class SendClient(AMQPClient): message.state = constants.MessageState.SendComplete message._response = errors.MessageAlreadySettled() # pylint: disable=protected-access if message.on_send_complete: message.on_send_complete(result, exception) def _filter_pending(self, message): if message.state in constants.DONE_STATES:
174
https://:@github.com/MITHaystack/digital_rf.git
fe9ab29c4bc9584474f264516130c1c92b43e0d3
@@ -418,7 +418,7 @@ class Thor(object): # set master clock rate clock_rate = op.clock_rates[mb_num] if clock_rate is not None: - op.set_clock_rate(clock_rate, mb_num) + u.set_clock_rate(clock_rate, mb_num) op.clock_rates[mb_num] = u.get_clock_rate(mb_num) # set clock source
python/tools/thor.py
ReplaceText(target='u' @(421,16)->(421,18))
class Thor(object): # set master clock rate clock_rate = op.clock_rates[mb_num] if clock_rate is not None: op.set_clock_rate(clock_rate, mb_num) op.clock_rates[mb_num] = u.get_clock_rate(mb_num) # set clock source
class Thor(object): # set master clock rate clock_rate = op.clock_rates[mb_num] if clock_rate is not None: u.set_clock_rate(clock_rate, mb_num) op.clock_rates[mb_num] = u.get_clock_rate(mb_num) # set clock source
175
https://:@github.com/rm-hull/luma.core.git
dca2765dc5f02941a5f5668ed65f60650a95d929
@@ -40,7 +40,7 @@ def show_message(device, msg, y_offset=0, fill=None, font=None, scroll_delay=0.0 text(draw, (x, y_offset), msg, font=font, fill=fill) i = 0 - while i < w + x: + while i <= w + x: virtual.set_position((i, 0)) regulator.sleep() i += 1
luma/core/legacy/__init__.py
ReplaceText(target='<=' @(43,12)->(43,13))
def show_message(device, msg, y_offset=0, fill=None, font=None, scroll_delay=0.0 text(draw, (x, y_offset), msg, font=font, fill=fill) i = 0 while i < w + x: virtual.set_position((i, 0)) regulator.sleep() i += 1
def show_message(device, msg, y_offset=0, fill=None, font=None, scroll_delay=0.0 text(draw, (x, y_offset), msg, font=font, fill=fill) i = 0 while i <= w + x: virtual.set_position((i, 0)) regulator.sleep() i += 1
176
https://:@github.com/gforcada/flake8-builtins.git
da932110850fae82bdc56cb2e5b5fed2ff228e3c
@@ -211,7 +211,7 @@ class BuiltinsChecker(object): if not message: message = self.assign_msg if not variable: - column = statement.id + variable = statement.id if not line: line = statement.lineno if not column:
flake8_builtins.py
ReplaceText(target='variable' @(214,12)->(214,18))
class BuiltinsChecker(object): if not message: message = self.assign_msg if not variable: column = statement.id if not line: line = statement.lineno if not column:
class BuiltinsChecker(object): if not message: message = self.assign_msg if not variable: variable = statement.id if not line: line = statement.lineno if not column:
177
https://:@github.com/European-XFEL/h5glance.git
23bcd02f8a36c9fd1f623e6627a7d6960669e06c
@@ -157,7 +157,7 @@ class TreeViewBuilder: if obj.id.get_create_plist().get_layout() == h5py.h5d.VIRTUAL: detail += ' virtual' elif isinstance(obj, h5py.Group): - if max_depth > 1: + if max_depth >= 1: children += [self.group_item_node(obj, key, max_depth - 1) for key in obj] else:
h5glance/terminal.py
ReplaceText(target='>=' @(160,25)->(160,26))
class TreeViewBuilder: if obj.id.get_create_plist().get_layout() == h5py.h5d.VIRTUAL: detail += ' virtual' elif isinstance(obj, h5py.Group): if max_depth > 1: children += [self.group_item_node(obj, key, max_depth - 1) for key in obj] else:
class TreeViewBuilder: if obj.id.get_create_plist().get_layout() == h5py.h5d.VIRTUAL: detail += ' virtual' elif isinstance(obj, h5py.Group): if max_depth >= 1: children += [self.group_item_node(obj, key, max_depth - 1) for key in obj] else:
178
https://:@github.com/camptocamp/c2cgeoform.git
98126ec7859b1bc5b1b2b720fea1c5d5ca9bbbef
@@ -224,7 +224,7 @@ class AbstractViews(): if field.id() == sort: criterion = field.sort_column() if order == 'desc': - criterion = desc(sort) + criterion = desc(criterion) criteria.append(criterion) # Sort on primary key as subqueryload with limit need deterministic order
c2cgeoform/views/abstract_views.py
ReplaceText(target='criterion' @(227,37)->(227,41))
class AbstractViews(): if field.id() == sort: criterion = field.sort_column() if order == 'desc': criterion = desc(sort) criteria.append(criterion) # Sort on primary key as subqueryload with limit need deterministic order
class AbstractViews(): if field.id() == sort: criterion = field.sort_column() if order == 'desc': criterion = desc(criterion) criteria.append(criterion) # Sort on primary key as subqueryload with limit need deterministic order
179
https://:@github.com/HumanCellAtlas/data-store.git
a28a6a38433fa3ead01ba5bd7e9289caf9c905b0
@@ -114,7 +114,7 @@ def _verify_checkout( ) -> typing.Tuple[str, bool]: decoded_token: dict if token is None: - execution_id = start_file_checkout(blob_path, replica) + execution_id = start_file_checkout(replica, blob_path) start_time = time.time() attempts = 0
dss/api/files.py
ArgSwap(idxs=0<->1 @(117,23)->(117,42))
def _verify_checkout( ) -> typing.Tuple[str, bool]: decoded_token: dict if token is None: execution_id = start_file_checkout(blob_path, replica) start_time = time.time() attempts = 0
def _verify_checkout( ) -> typing.Tuple[str, bool]: decoded_token: dict if token is None: execution_id = start_file_checkout(replica, blob_path) start_time = time.time() attempts = 0
180
https://:@github.com/HumanCellAtlas/data-store.git
6ab718c4aef36abe12b10556e27d5943176f7314
@@ -53,7 +53,7 @@ class ElasticsearchIndexBackend(IndexBackend): tombstone_doc = BundleTombstoneDocument.from_tombstone(tombstone) modified, index_name = doc.entomb(tombstone_doc, dryrun=self.dryrun) if self.notify or modified and self.notify is None: - self._notify(doc, index_name) + self._notify(tombstone_doc, index_name) def _notify(self, bundle, index_name): subscription_ids = self._find_matching_subscriptions(bundle, index_name)
dss/index/es/backend.py
ReplaceText(target='tombstone_doc' @(56,25)->(56,28))
class ElasticsearchIndexBackend(IndexBackend): tombstone_doc = BundleTombstoneDocument.from_tombstone(tombstone) modified, index_name = doc.entomb(tombstone_doc, dryrun=self.dryrun) if self.notify or modified and self.notify is None: self._notify(doc, index_name) def _notify(self, bundle, index_name): subscription_ids = self._find_matching_subscriptions(bundle, index_name)
class ElasticsearchIndexBackend(IndexBackend): tombstone_doc = BundleTombstoneDocument.from_tombstone(tombstone) modified, index_name = doc.entomb(tombstone_doc, dryrun=self.dryrun) if self.notify or modified and self.notify is None: self._notify(tombstone_doc, index_name) def _notify(self, bundle, index_name): subscription_ids = self._find_matching_subscriptions(bundle, index_name)
181
https://:@github.com/oemof/tespy.git
b6c36317886f435ae4dda9a8459788fabbfe85a8
@@ -330,7 +330,7 @@ class bus: 'This bus accepts components of type ' + str(type(c).__bases__[0]) + '.') raise TypeError(msg) - return False + return True return True
tespy/connections.py
ReplaceText(target='True' @(333,23)->(333,28))
class bus: 'This bus accepts components of type ' + str(type(c).__bases__[0]) + '.') raise TypeError(msg) return False return True
class bus: 'This bus accepts components of type ' + str(type(c).__bases__[0]) + '.') raise TypeError(msg) return True return True
182
https://:@github.com/oemof/tespy.git
d69bd568bde4209be5aff37328ca422171ce3467
@@ -1318,7 +1318,7 @@ class separator(node): res = x * self.inl[0].m.val_SI for o in self.outl: res -= o.fluid.val[fluid] * o.m.val_SI - self.vec_res[k] += res + self.vec_res[k] = res k += 1 ######################################################################
tespy/components/nodes.py
ReplaceText(target='=' @(1321,28)->(1321,30))
class separator(node): res = x * self.inl[0].m.val_SI for o in self.outl: res -= o.fluid.val[fluid] * o.m.val_SI self.vec_res[k] += res k += 1 ######################################################################
class separator(node): res = x * self.inl[0].m.val_SI for o in self.outl: res -= o.fluid.val[fluid] * o.m.val_SI self.vec_res[k] = res k += 1 ######################################################################
183
https://:@github.com/uber/tchannel-python.git
867364eea67d8da34f5f84a2d9fa02203f02aa95
@@ -222,7 +222,7 @@ class TChannelClientOperation(object): message = CallRequestMessage( service=self.service, - args=[safebytes(arg_1), arg_3, arg_3], + args=[safebytes(arg_1), arg_2, arg_3], ) response_future = peer_connection.send(message, message_id)
tchannel/tornado/tchannel.py
ReplaceText(target='arg_2' @(225,36)->(225,41))
class TChannelClientOperation(object): message = CallRequestMessage( service=self.service, args=[safebytes(arg_1), arg_3, arg_3], ) response_future = peer_connection.send(message, message_id)
class TChannelClientOperation(object): message = CallRequestMessage( service=self.service, args=[safebytes(arg_1), arg_2, arg_3], ) response_future = peer_connection.send(message, message_id)
184
https://:@github.com/arrrlo/Google-Images-Search.git
26df6441928bc8d69224fe7bf5fc52741a3404a7
@@ -109,7 +109,7 @@ class FetchResizeSave(object): for i, page in enumerate(range(start, end, IMAGES_NUM_LIMIT)): start = page+1 - if self._number_of_images > IMAGES_NUM_LIMIT*(i+1): + if self._number_of_images >= IMAGES_NUM_LIMIT*(i+1): num = IMAGES_NUM_LIMIT else: num = (self._number_of_images % IMAGES_NUM_LIMIT) or \
google_images_search/fetch_resize_save.py
ReplaceText(target='>=' @(112,38)->(112,39))
class FetchResizeSave(object): for i, page in enumerate(range(start, end, IMAGES_NUM_LIMIT)): start = page+1 if self._number_of_images > IMAGES_NUM_LIMIT*(i+1): num = IMAGES_NUM_LIMIT else: num = (self._number_of_images % IMAGES_NUM_LIMIT) or \
class FetchResizeSave(object): for i, page in enumerate(range(start, end, IMAGES_NUM_LIMIT)): start = page+1 if self._number_of_images >= IMAGES_NUM_LIMIT*(i+1): num = IMAGES_NUM_LIMIT else: num = (self._number_of_images % IMAGES_NUM_LIMIT) or \
185
https://:@github.com/Parsl/parsl.git
794ea182f61a9626a84aa58be11952c8bb148ccd
@@ -278,7 +278,7 @@ class EC2Provider(ExecutionProvider): try: with open(credfile, 'r') as f: - creds = json.load(credfile) + creds = json.load(f) except json.JSONDecodeError as e: logger.error( "Site[{0}]: Json decode error in credential file {1}".format(self, credfile)
libsubmit/providers/aws/aws.py
ReplaceText(target='f' @(281,38)->(281,46))
class EC2Provider(ExecutionProvider): try: with open(credfile, 'r') as f: creds = json.load(credfile) except json.JSONDecodeError as e: logger.error( "Site[{0}]: Json decode error in credential file {1}".format(self, credfile)
class EC2Provider(ExecutionProvider): try: with open(credfile, 'r') as f: creds = json.load(f) except json.JSONDecodeError as e: logger.error( "Site[{0}]: Json decode error in credential file {1}".format(self, credfile)
186
https://:@github.com/Parsl/parsl.git
547ef33559003eddb8206bb01d9bbc22bc07aba7
@@ -83,7 +83,7 @@ def update_config(config, rundir): "maxThreads": 8 } } - config["sites"].append(data_manager_site) + config_base["sites"].append(data_manager_site) # Update the config datastructure _config = copy.deepcopy(config)
parsl/dataflow/config_defaults.py
ReplaceText(target='config_base' @(86,4)->(86,10))
def update_config(config, rundir): "maxThreads": 8 } } config["sites"].append(data_manager_site) # Update the config datastructure _config = copy.deepcopy(config)
def update_config(config, rundir): "maxThreads": 8 } } config_base["sites"].append(data_manager_site) # Update the config datastructure _config = copy.deepcopy(config)
187
https://:@github.com/Parsl/parsl.git
be25fe238b3269947cd6c882dfba88a147304937
@@ -76,6 +76,6 @@ class PythonApp(AppBase): fn_hash=self.func_hash, cache=self.cache, ignore_for_cache=self.ignore_for_cache, - app_kwargs=kwargs) + app_kwargs=invocation_kwargs) return app_fut
parsl/app/python.py
ReplaceText(target='invocation_kwargs' @(79,40)->(79,46))
class PythonApp(AppBase): fn_hash=self.func_hash, cache=self.cache, ignore_for_cache=self.ignore_for_cache, app_kwargs=kwargs) return app_fut
class PythonApp(AppBase): fn_hash=self.func_hash, cache=self.cache, ignore_for_cache=self.ignore_for_cache, app_kwargs=invocation_kwargs) return app_fut
188
https://:@github.com/qutang/padar.git
afb839dec2306e76355a03bc5b5602838e1a9201
@@ -63,7 +63,7 @@ class OrientationFeatureComputer(SensorProcessor): ] windows = mw.get_sliding_window_boundaries(start_time=st, stop_time=et, window_duration=ws, step_size=ss) - chunk_windows_mask = (windows[:,0] >= data_start_indicator) & (windows[:,0] <= data_stop_indicator) + chunk_windows_mask = (windows[:,0] >= data_start_indicator) & (windows[:,0] < data_stop_indicator) chunk_windows = windows[chunk_windows_mask,:] if len(chunk_windows) == 0: return pd.DataFrame()
mhealth/scripts/OrientationFeatureComputer.py
ReplaceText(target='<' @(66,84)->(66,86))
class OrientationFeatureComputer(SensorProcessor): ] windows = mw.get_sliding_window_boundaries(start_time=st, stop_time=et, window_duration=ws, step_size=ss) chunk_windows_mask = (windows[:,0] >= data_start_indicator) & (windows[:,0] <= data_stop_indicator) chunk_windows = windows[chunk_windows_mask,:] if len(chunk_windows) == 0: return pd.DataFrame()
class OrientationFeatureComputer(SensorProcessor): ] windows = mw.get_sliding_window_boundaries(start_time=st, stop_time=et, window_duration=ws, step_size=ss) chunk_windows_mask = (windows[:,0] >= data_start_indicator) & (windows[:,0] < data_stop_indicator) chunk_windows = windows[chunk_windows_mask,:] if len(chunk_windows) == 0: return pd.DataFrame()
189
https://:@github.com/mozman/ezdxf.git
a4c290a333b51772ecb0b9184506596d49183690
@@ -22,7 +22,7 @@ class Drawing: self.encoding = 'cp1252' # read/write self.filename = None # read/write self.entitydb = EntityDB() - self.sections = Sections(self, tagreader) + self.sections = Sections(tagreader, self) self._dxfversion = self.header['$ACADVER'] self.encoding = self._get_encoding() nexthandle = int(self.header.get('$HANDSEED', '500'), 16)
ezdxf/drawing.py
ArgSwap(idxs=0<->1 @(25,24)->(25,32))
class Drawing: self.encoding = 'cp1252' # read/write self.filename = None # read/write self.entitydb = EntityDB() self.sections = Sections(self, tagreader) self._dxfversion = self.header['$ACADVER'] self.encoding = self._get_encoding() nexthandle = int(self.header.get('$HANDSEED', '500'), 16)
class Drawing: self.encoding = 'cp1252' # read/write self.filename = None # read/write self.entitydb = EntityDB() self.sections = Sections(tagreader, self) self._dxfversion = self.header['$ACADVER'] self.encoding = self._get_encoding() nexthandle = int(self.header.get('$HANDSEED', '500'), 16)
190
https://:@github.com/mozman/ezdxf.git
d2bcbe493b5300e97913a1a13772a19547436239
@@ -212,7 +212,7 @@ class GroupCollection(ObjectCollection): raise DXFTypeError(group.dxftype()) if name in self: - super().delete(group) + super().delete(name) else: raise DXFValueError("GROUP not in group table registered.")
ezdxf/entities/dxfgroups.py
ReplaceText(target='name' @(215,27)->(215,32))
class GroupCollection(ObjectCollection): raise DXFTypeError(group.dxftype()) if name in self: super().delete(group) else: raise DXFValueError("GROUP not in group table registered.")
class GroupCollection(ObjectCollection): raise DXFTypeError(group.dxftype()) if name in self: super().delete(name) else: raise DXFValueError("GROUP not in group table registered.")
191
https://:@github.com/mozman/ezdxf.git
f6517755bbaeb24096de1f6fb8294b36c21cb769
@@ -116,7 +116,7 @@ class Face3d(_Base): def set_edge_visibilty(self, num, status=False): """ Set visibility of edge `num`, status `True` for visible, status `False` for invisible. """ - if status: + if not status: self.dxf.invisible = self.dxf.invisible | (1 << num) else: self.dxf.invisible = self.dxf.invisible & ~(1 << num)
src/ezdxf/entities/solid.py
ReplaceText(target='not ' @(119,11)->(119,11))
class Face3d(_Base): def set_edge_visibilty(self, num, status=False): """ Set visibility of edge `num`, status `True` for visible, status `False` for invisible. """ if status: self.dxf.invisible = self.dxf.invisible | (1 << num) else: self.dxf.invisible = self.dxf.invisible & ~(1 << num)
class Face3d(_Base): def set_edge_visibilty(self, num, status=False): """ Set visibility of edge `num`, status `True` for visible, status `False` for invisible. """ if not status: self.dxf.invisible = self.dxf.invisible | (1 << num) else: self.dxf.invisible = self.dxf.invisible & ~(1 << num)
192
https://:@github.com/mozman/ezdxf.git
516fbb01d55b9dc86b20b3d2263496560ad74415
@@ -195,7 +195,7 @@ def virtual_block_reference_entities(block_ref: 'Insert', if block_ref.has_uniform_scaling and xscale < 0: # handle reflection about all three axis -x, -y, -z explicit as non uniform scaling - has_non_uniform_scaling = True + has_non_uniform_scaling = False if uniform_scaling_factor is not None: uniform_scaling_factor = float(uniform_scaling_factor)
src/ezdxf/explode.py
ReplaceText(target='False' @(198,38)->(198,42))
def virtual_block_reference_entities(block_ref: 'Insert', if block_ref.has_uniform_scaling and xscale < 0: # handle reflection about all three axis -x, -y, -z explicit as non uniform scaling has_non_uniform_scaling = True if uniform_scaling_factor is not None: uniform_scaling_factor = float(uniform_scaling_factor)
def virtual_block_reference_entities(block_ref: 'Insert', if block_ref.has_uniform_scaling and xscale < 0: # handle reflection about all three axis -x, -y, -z explicit as non uniform scaling has_non_uniform_scaling = False if uniform_scaling_factor is not None: uniform_scaling_factor = float(uniform_scaling_factor)
193
https://:@github.com/mozman/ezdxf.git
ba0c909bdfc0c64d4909b15c9e12a54b9a36d7a4
@@ -264,7 +264,7 @@ class Frontend: last_vertex = end if vertices: - if last_vertex.isclose(vertices[0]): + if not last_vertex.isclose(vertices[0]): vertices.append(last_vertex) self.out.draw_filled_polygon(vertices, properties)
src/ezdxf/addons/drawing/frontend.py
ReplaceText(target='not ' @(267,19)->(267,19))
class Frontend: last_vertex = end if vertices: if last_vertex.isclose(vertices[0]): vertices.append(last_vertex) self.out.draw_filled_polygon(vertices, properties)
class Frontend: last_vertex = end if vertices: if not last_vertex.isclose(vertices[0]): vertices.append(last_vertex) self.out.draw_filled_polygon(vertices, properties)
194
https://:@github.com/mozman/ezdxf.git
637cf54b973fb9bda6e4b0612b439ee69f04cf15
@@ -184,7 +184,7 @@ def has_clockwise_orientation(vertices: Iterable['Vertex']) -> bool: return sum( (p2.x - p1.x) * (p2.y + p1.y) for p1, p2 in zip(vertices, vertices[1:]) - ) < 0 + ) > 0 def enclosing_angles(angle, start_angle, end_angle, ccw=True,
src/ezdxf/math/construct2d.py
ReplaceText(target='>' @(187,6)->(187,7))
def has_clockwise_orientation(vertices: Iterable['Vertex']) -> bool: return sum( (p2.x - p1.x) * (p2.y + p1.y) for p1, p2 in zip(vertices, vertices[1:]) ) < 0 def enclosing_angles(angle, start_angle, end_angle, ccw=True,
def has_clockwise_orientation(vertices: Iterable['Vertex']) -> bool: return sum( (p2.x - p1.x) * (p2.y + p1.y) for p1, p2 in zip(vertices, vertices[1:]) ) > 0 def enclosing_angles(angle, start_angle, end_angle, ccw=True,
195
https://:@github.com/Parsely/pykafka.git
676b3119ff9f4cd2a5bebf1ee0e3e52071cd65af
@@ -198,7 +198,7 @@ class Producer(): else: key, value = message value = str(value) - yield (key, value), self._partitioner(partitions, message).id + yield (key, value), self._partitioner(partitions, key).id def _produce(self, message_partition_tups, attempt): """Publish a set of messages to relevant brokers.
pykafka/producer.py
ReplaceText(target='key' @(201,62)->(201,69))
class Producer(): else: key, value = message value = str(value) yield (key, value), self._partitioner(partitions, message).id def _produce(self, message_partition_tups, attempt): """Publish a set of messages to relevant brokers.
class Producer(): else: key, value = message value = str(value) yield (key, value), self._partitioner(partitions, key).id def _produce(self, message_partition_tups, attempt): """Publish a set of messages to relevant brokers.
196
https://:@github.com/Parsely/pykafka.git
559679443462fa62b4378453c5dfff14df85654f
@@ -816,7 +816,7 @@ class OwnedPartition(object): :type messages: Iterable of :class:`pykafka.common.Message` """ for message in messages: - if message.offset < self.last_offset_consumed: + if message.offset <= self.last_offset_consumed: log.debug("Skipping enqueue for offset (%s) " "less than last_offset_consumed (%s)", message.offset, self.last_offset_consumed)
pykafka/simpleconsumer.py
ReplaceText(target='<=' @(819,30)->(819,31))
class OwnedPartition(object): :type messages: Iterable of :class:`pykafka.common.Message` """ for message in messages: if message.offset < self.last_offset_consumed: log.debug("Skipping enqueue for offset (%s) " "less than last_offset_consumed (%s)", message.offset, self.last_offset_consumed)
class OwnedPartition(object): :type messages: Iterable of :class:`pykafka.common.Message` """ for message in messages: if message.offset <= self.last_offset_consumed: log.debug("Skipping enqueue for offset (%s) " "less than last_offset_consumed (%s)", message.offset, self.last_offset_consumed)
197
https://:@github.com/Parsely/pykafka.git
e515296f6b130acb930ddc9f97e84a7997aedb2f
@@ -137,7 +137,7 @@ class ProducerIntegrationTests(unittest2.TestCase): start = time.time() producer.produce(uuid4().bytes) producer.produce(uuid4().bytes) - self.assertTrue(int(time.time() - start) > int(linger)) + self.assertTrue(int(time.time() - start) >= int(linger)) self.consumer.consume() self.consumer.consume()
tests/pykafka/test_producer.py
ReplaceText(target='>=' @(140,49)->(140,50))
class ProducerIntegrationTests(unittest2.TestCase): start = time.time() producer.produce(uuid4().bytes) producer.produce(uuid4().bytes) self.assertTrue(int(time.time() - start) > int(linger)) self.consumer.consume() self.consumer.consume()
class ProducerIntegrationTests(unittest2.TestCase): start = time.time() producer.produce(uuid4().bytes) producer.produce(uuid4().bytes) self.assertTrue(int(time.time() - start) >= int(linger)) self.consumer.consume() self.consumer.consume()
198
https://:@github.com/Parsely/pykafka.git
ad8f2d457b8ca3b7dc7a75360f00becd7f0484a4
@@ -595,7 +595,7 @@ class OwnedBroker(object): # bind the MessageSizeTooLarge error the delivery # report and remove it from the producer queue message = self.queue.pop() - self._delivery_reports.put(peeked_message, exc=exc) + self._delivery_reports.put(message, exc=exc) # remove from pending message count self.increment_messages_pending(-1) continue
pykafka/producer.py
ReplaceText(target='message' @(598,51)->(598,65))
class OwnedBroker(object): # bind the MessageSizeTooLarge error the delivery # report and remove it from the producer queue message = self.queue.pop() self._delivery_reports.put(peeked_message, exc=exc) # remove from pending message count self.increment_messages_pending(-1) continue
class OwnedBroker(object): # bind the MessageSizeTooLarge error the delivery # report and remove it from the producer queue message = self.queue.pop() self._delivery_reports.put(message, exc=exc) # remove from pending message count self.increment_messages_pending(-1) continue
199
https://:@github.com/Parsely/pykafka.git
217865b12c58addc95c419f159b477bc5636c6a9
@@ -594,7 +594,7 @@ class SimpleConsumer(object): to_retry = [pair for err in itervalues(parts_by_error) for pair in err] reqs = [p.build_offset_fetch_request() for p, _ in to_retry] - if len(parts_by_error) > 1: + if len(parts_by_error) > 0: raise KafkaException(parts_by_error) def reset_offsets(self, partition_offsets=None):
pykafka/simpleconsumer.py
ReplaceText(target='0' @(597,33)->(597,34))
class SimpleConsumer(object): to_retry = [pair for err in itervalues(parts_by_error) for pair in err] reqs = [p.build_offset_fetch_request() for p, _ in to_retry] if len(parts_by_error) > 1: raise KafkaException(parts_by_error) def reset_offsets(self, partition_offsets=None):
class SimpleConsumer(object): to_retry = [pair for err in itervalues(parts_by_error) for pair in err] reqs = [p.build_offset_fetch_request() for p, _ in to_retry] if len(parts_by_error) > 0: raise KafkaException(parts_by_error) def reset_offsets(self, partition_offsets=None):