Unnamed: 0
int64
0
2.44k
repo
stringlengths
32
81
hash
stringlengths
40
40
diff
stringlengths
113
1.17k
old_path
stringlengths
5
84
rewrite
stringlengths
34
79
initial_state
stringlengths
75
980
final_state
stringlengths
76
980
500
https://:@github.com/PmagPy/PmagPy.git
1c9dfb436532fcefcdf85913462eca89c4a05886
@@ -3363,7 +3363,7 @@ def upload_magic3(concat=0, dir_path='.', dmodel=None, vocab="", contribution=No # otherwise create a new Contribution in dir_path con = Contribution(dir_path, vocabulary=vocab) - dir_path = contribution.directory + dir_path = con.directory # take out any extra added columns con.remove_non_magic_cols() # begin the upload process
pmagpy/ipmag.py
ReplaceText(target='con' @(3366,15)->(3366,27))
def upload_magic3(concat=0, dir_path='.', dmodel=None, vocab="", contribution=No # otherwise create a new Contribution in dir_path con = Contribution(dir_path, vocabulary=vocab) dir_path = contribution.directory # take out any extra added columns con.remove_non_magic_cols() # begin the upload process
def upload_magic3(concat=0, dir_path='.', dmodel=None, vocab="", contribution=No # otherwise create a new Contribution in dir_path con = Contribution(dir_path, vocabulary=vocab) dir_path = con.directory # take out any extra added columns con.remove_non_magic_cols() # begin the upload process
501
https://:@github.com/PmagPy/PmagPy.git
8fd06aa1272e4a82082fc0a0b5641242e2f18154
@@ -72,7 +72,7 @@ def main(): pastTime=startTime if d < commandLength: pastTime=startTime-commandLength - printout="Due to long processing time the look-back time has been extended to " +str(pastTime.total_seconds()) + " seconds" + "\n" + printout="Due to long processing time the look-back time has been extended to " +str(commandLength.total_seconds()) + " seconds" + "\n" f.write(printout) else: pastTime=startTime-d
programs/createNewPlots.py
ReplaceText(target='commandLength' @(75,97)->(75,105))
def main(): pastTime=startTime if d < commandLength: pastTime=startTime-commandLength printout="Due to long processing time the look-back time has been extended to " +str(pastTime.total_seconds()) + " seconds" + "\n" f.write(printout) else: pastTime=startTime-d
def main(): pastTime=startTime if d < commandLength: pastTime=startTime-commandLength printout="Due to long processing time the look-back time has been extended to " +str(commandLength.total_seconds()) + " seconds" + "\n" f.write(printout) else: pastTime=startTime-d
502
https://:@github.com/PmagPy/PmagPy.git
de14e55e58558ac9d6980bdaf309fe17496a8f44
@@ -3524,7 +3524,7 @@ def iodp_kly4s_lore(kly4s_file, meas_out='measurements.txt', tau3=in_df['Kmin susceptibility (SI)']/3 v3_dec=in_df['Kmin dec (deg)'] v3_inc=in_df['Kmin inc (deg)'] - specimens_df['aniso_v3']=tau1.astype('str')+":"+v3_dec.astype('str')+":"+v3_inc.astype('str') + specimens_df['aniso_v3']=tau3.astype('str')+":"+v3_dec.astype('str')+":"+v3_inc.astype('str') # output data files
pmagpy/convert_2_magic.py
ReplaceText(target='tau3' @(3527,29)->(3527,33))
def iodp_kly4s_lore(kly4s_file, meas_out='measurements.txt', tau3=in_df['Kmin susceptibility (SI)']/3 v3_dec=in_df['Kmin dec (deg)'] v3_inc=in_df['Kmin inc (deg)'] specimens_df['aniso_v3']=tau1.astype('str')+":"+v3_dec.astype('str')+":"+v3_inc.astype('str') # output data files
def iodp_kly4s_lore(kly4s_file, meas_out='measurements.txt', tau3=in_df['Kmin susceptibility (SI)']/3 v3_dec=in_df['Kmin dec (deg)'] v3_inc=in_df['Kmin inc (deg)'] specimens_df['aniso_v3']=tau3.astype('str')+":"+v3_dec.astype('str')+":"+v3_inc.astype('str') # output data files
503
https://:@github.com/efficios/pytsdl.git
1b6d994aa14340af28ed903692ff0fde598ebaeb
@@ -1671,7 +1671,7 @@ class _DocCreatorVisitor: # assign tag to copy now variant_copy.tag = self._decode_unary(t.tag.value) - return variant + return variant_copy def _type_to_obj(self, t): return self._type_to_obj_map[type(t)](t)
pytsdl/parser.py
ReplaceText(target='variant_copy' @(1674,15)->(1674,22))
class _DocCreatorVisitor: # assign tag to copy now variant_copy.tag = self._decode_unary(t.tag.value) return variant def _type_to_obj(self, t): return self._type_to_obj_map[type(t)](t)
class _DocCreatorVisitor: # assign tag to copy now variant_copy.tag = self._decode_unary(t.tag.value) return variant_copy def _type_to_obj(self, t): return self._type_to_obj_map[type(t)](t)
504
https://:@github.com/mabrownnyu/youtube-data-api.git
07d71d5668d1dbeec28d84ce1cf8982fbd71be36
@@ -585,7 +585,7 @@ class YoutubeDataApi: else: captions = [] for v_id in video_id: - captions.append(_get_captions(video_id, lang_code=lang_code, parser=parser, **kwargs)) + captions.append(_get_captions(v_id, lang_code=lang_code, parser=parser, **kwargs)) return captions
youtube_api/youtube_api.py
ReplaceText(target='v_id' @(588,46)->(588,54))
class YoutubeDataApi: else: captions = [] for v_id in video_id: captions.append(_get_captions(video_id, lang_code=lang_code, parser=parser, **kwargs)) return captions
class YoutubeDataApi: else: captions = [] for v_id in video_id: captions.append(_get_captions(v_id, lang_code=lang_code, parser=parser, **kwargs)) return captions
505
https://:@github.com/xhochy/conda-mirror-ng.git
e0b34555d779f9b440512f850da39aa5a0e29ece
@@ -85,5 +85,5 @@ def test_handling_bad_package(tmpdir, repodata): with bz2.BZ2File(bad_pkg_path, 'wb') as f: f.write("This is a fake package".encode()) assert bad_pkg_name in os.listdir(bad_pkg_root) - conda_mirror._validate_packages(repodata, local_repo_root) + conda_mirror._validate_packages(repodata, bad_pkg_root) assert bad_pkg_name not in os.listdir(bad_pkg_root) \ No newline at end of file
test/test_conda_mirror.py
ReplaceText(target='bad_pkg_root' @(88,46)->(88,61))
def test_handling_bad_package(tmpdir, repodata): with bz2.BZ2File(bad_pkg_path, 'wb') as f: f.write("This is a fake package".encode()) assert bad_pkg_name in os.listdir(bad_pkg_root) conda_mirror._validate_packages(repodata, local_repo_root) assert bad_pkg_name not in os.listdir(bad_pkg_root) \ No newline at end of file
def test_handling_bad_package(tmpdir, repodata): with bz2.BZ2File(bad_pkg_path, 'wb') as f: f.write("This is a fake package".encode()) assert bad_pkg_name in os.listdir(bad_pkg_root) conda_mirror._validate_packages(repodata, bad_pkg_root) assert bad_pkg_name not in os.listdir(bad_pkg_root) \ No newline at end of file
506
https://:@github.com/kelsoncm/sc4.git
8c4e8ac8358668aaa6c32b73af44178e6e9db3ab
@@ -45,6 +45,6 @@ def others_months(): def daterange(start, end, step=datetime.timedelta(1)): curr = start - while curr < end: + while curr <= end: yield curr curr += step
sc4py/sc4py/datetime.py
ReplaceText(target='<=' @(48,15)->(48,16))
def others_months(): def daterange(start, end, step=datetime.timedelta(1)): curr = start while curr < end: yield curr curr += step
def others_months(): def daterange(start, end, step=datetime.timedelta(1)): curr = start while curr <= end: yield curr curr += step
507
https://:@github.com/makingspace/pubsubpy.git
bd6903e8ea6e7069712d7cf17cb725aa7f821d1a
@@ -5,7 +5,7 @@ MODEL_EXCHANGE = 'model_exchange' __REQUIRED_INIT_KWARGS = {AMQP_URL, MODEL_EXCHANGE} __OPTIONAL_INIT_KWARGS = set() -__ALLOWED_INIT_KWARGS = __REQUIRED_INIT_KWARGS & __OPTIONAL_INIT_KWARGS +__ALLOWED_INIT_KWARGS = __REQUIRED_INIT_KWARGS | __OPTIONAL_INIT_KWARGS def init(**kwargs):
pubsub/__init__.py
ReplaceText(target='|' @(8,47)->(8,48))
MODEL_EXCHANGE = 'model_exchange' __REQUIRED_INIT_KWARGS = {AMQP_URL, MODEL_EXCHANGE} __OPTIONAL_INIT_KWARGS = set() __ALLOWED_INIT_KWARGS = __REQUIRED_INIT_KWARGS & __OPTIONAL_INIT_KWARGS def init(**kwargs):
MODEL_EXCHANGE = 'model_exchange' __REQUIRED_INIT_KWARGS = {AMQP_URL, MODEL_EXCHANGE} __OPTIONAL_INIT_KWARGS = set() __ALLOWED_INIT_KWARGS = __REQUIRED_INIT_KWARGS | __OPTIONAL_INIT_KWARGS def init(**kwargs):
508
https://:@github.com/moff4/kframe.git
01c644e825136b2b86d9b6f7dd6a34e4997c128c
@@ -310,7 +310,7 @@ class Parent: kwargs (dict) - dict of arg that will be passed to init() as **kwargs (plugins only) """ from kframe.base.plugin import Plugin - if issubclass(target, Plugin): + if not issubclass(target, Plugin): raise ValueError('target ({}) bust be isinstance of kframe.Plugin'.format(str(target))) self.plugin_t[kw.get('key', target.name)] = { 'target': target,
kframe/base/parent.py
ReplaceText(target='not ' @(313,11)->(313,11))
class Parent: kwargs (dict) - dict of arg that will be passed to init() as **kwargs (plugins only) """ from kframe.base.plugin import Plugin if issubclass(target, Plugin): raise ValueError('target ({}) bust be isinstance of kframe.Plugin'.format(str(target))) self.plugin_t[kw.get('key', target.name)] = { 'target': target,
class Parent: kwargs (dict) - dict of arg that will be passed to init() as **kwargs (plugins only) """ from kframe.base.plugin import Plugin if not issubclass(target, Plugin): raise ValueError('target ({}) bust be isinstance of kframe.Plugin'.format(str(target))) self.plugin_t[kw.get('key', target.name)] = { 'target': target,
509
https://:@github.com/moff4/kframe.git
85c41d1f13cb2c7d79787bbe35de661aa6c6b824
@@ -329,7 +329,7 @@ class Task: cfg.update(kwargs) if 'shedule' in cfg: cfg['shedule'] = self._convert_shedule(cfg['shedule']) - self.cfg.update(kwargs) + self.cfg.update(cfg) def ready_for_run(self, t, tm): """
kframe/plugins/planner/task.py
ReplaceText(target='cfg' @(332,24)->(332,30))
class Task: cfg.update(kwargs) if 'shedule' in cfg: cfg['shedule'] = self._convert_shedule(cfg['shedule']) self.cfg.update(kwargs) def ready_for_run(self, t, tm): """
class Task: cfg.update(kwargs) if 'shedule' in cfg: cfg['shedule'] = self._convert_shedule(cfg['shedule']) self.cfg.update(cfg) def ready_for_run(self, t, tm): """
510
https://:@github.com/qaviton/qaviton_package_manager.git
0498c2b71479109db949e369a0b7189d75dded65
@@ -63,7 +63,7 @@ class Build(Prep): def update_version(self, version): version = self.versioning(version) content = self.get_pkg_init() - if b'\n__version__' not in content or not content.startswith(b'__version__'): + if b'\n__version__' not in content and not content.startswith(b'__version__'): raise IOError("missing __version__ in the package __init__.py file") lines = content.splitlines() for i, line in enumerate(lines):
qaviton_package_manager/manager_methods/distribute_to_git.py
ReplaceText(target='and' @(66,43)->(66,45))
class Build(Prep): def update_version(self, version): version = self.versioning(version) content = self.get_pkg_init() if b'\n__version__' not in content or not content.startswith(b'__version__'): raise IOError("missing __version__ in the package __init__.py file") lines = content.splitlines() for i, line in enumerate(lines):
class Build(Prep): def update_version(self, version): version = self.versioning(version) content = self.get_pkg_init() if b'\n__version__' not in content and not content.startswith(b'__version__'): raise IOError("missing __version__ in the package __init__.py file") lines = content.splitlines() for i, line in enumerate(lines):
511
https://:@github.com/morganthrapp/pyACH.git
516f347fa832237e2868544c078f86f80c7c2a1c
@@ -256,7 +256,7 @@ class BatchHeader: def _get_effective_entry_date(effective_entry_date): _date = datetime.datetime.today() _date += datetime.timedelta(days=effective_entry_date) - while _date.isoweekday() not in WEEKEND: + while _date.isoweekday() in WEEKEND: _date += datetime.timedelta(days=1) return _date.strftime(day_format_string)
pyach/ACHRecordTypes.py
ReplaceText(target=' in ' @(259,32)->(259,40))
class BatchHeader: def _get_effective_entry_date(effective_entry_date): _date = datetime.datetime.today() _date += datetime.timedelta(days=effective_entry_date) while _date.isoweekday() not in WEEKEND: _date += datetime.timedelta(days=1) return _date.strftime(day_format_string)
class BatchHeader: def _get_effective_entry_date(effective_entry_date): _date = datetime.datetime.today() _date += datetime.timedelta(days=effective_entry_date) while _date.isoweekday() in WEEKEND: _date += datetime.timedelta(days=1) return _date.strftime(day_format_string)
512
https://:@github.com/eldarion/agon-ratings.git
99a317f1ead850e948cf08123fef8ea60b30060e
@@ -72,7 +72,7 @@ class Rating(models.Model): rating_obj = None if rating_obj and rating == 0: - return rating.clear() + return rating_obj.clear() if rating_obj is None: rating_obj = cls.objects.create(
pinax/ratings/models.py
ReplaceText(target='rating_obj' @(75,19)->(75,25))
class Rating(models.Model): rating_obj = None if rating_obj and rating == 0: return rating.clear() if rating_obj is None: rating_obj = cls.objects.create(
class Rating(models.Model): rating_obj = None if rating_obj and rating == 0: return rating_obj.clear() if rating_obj is None: rating_obj = cls.objects.create(
513
https://:@github.com/sraashis/ature.git
caee2ab19dc6b73ab1916156ad80c5732957803f
@@ -94,7 +94,7 @@ class PatchesGenerator(Dataset): img_tensor = self.transform(img_tensor) if self.segment_mode: - return self.data[ID], img_tensor, self.labels[index] + return self.data[index], img_tensor, self.labels[index] return img_tensor, self.labels[index]
neuralnet/utils/datasets.py
ReplaceText(target='index' @(97,29)->(97,31))
class PatchesGenerator(Dataset): img_tensor = self.transform(img_tensor) if self.segment_mode: return self.data[ID], img_tensor, self.labels[index] return img_tensor, self.labels[index]
class PatchesGenerator(Dataset): img_tensor = self.transform(img_tensor) if self.segment_mode: return self.data[index], img_tensor, self.labels[index] return img_tensor, self.labels[index]
514
https://:@github.com/sraashis/ature.git
e192de63324d8472c331bd06ab36ea710ee93d2a
@@ -96,7 +96,7 @@ class ThrnetTrainer(NNTrainer): img_loss += current_loss thr = thr_map[..., None][..., None] - segmented = (prob_map >= thr.byte()) + segmented = (prob_map > thr.byte()) # batch_score = ScoreAccumulator().add_tensor(segmented, truth) print('Loss: ', current_loss, end='\r')
neuralnet/thrnet/thrnet_trainer.py
ReplaceText(target='>' @(99,42)->(99,44))
class ThrnetTrainer(NNTrainer): img_loss += current_loss thr = thr_map[..., None][..., None] segmented = (prob_map >= thr.byte()) # batch_score = ScoreAccumulator().add_tensor(segmented, truth) print('Loss: ', current_loss, end='\r')
class ThrnetTrainer(NNTrainer): img_loss += current_loss thr = thr_map[..., None][..., None] segmented = (prob_map > thr.byte()) # batch_score = ScoreAccumulator().add_tensor(segmented, truth) print('Loss: ', current_loss, end='\r')
515
https://:@github.com/sraashis/ature.git
934f9ff3b34b487614e4f2653c153cd78d716b57
@@ -50,7 +50,7 @@ class ThrnetTrainer(NNTrainer): if gen_images: img = segmented_img.clone().cpu().numpy() - img_score.add_array(img_obj.ground_truth, img) + img_score.add_array(img, img_obj.ground_truth) # img = iu.remove_connected_comp(np.array(segmented_img, dtype=np.uint8), # connected_comp_diam_limit=10) IMG.fromarray(np.array(img, dtype=np.uint8)).save(
neuralnet/mapnet/mapnet_trainer.py
ArgSwap(idxs=0<->1 @(53,20)->(53,39))
class ThrnetTrainer(NNTrainer): if gen_images: img = segmented_img.clone().cpu().numpy() img_score.add_array(img_obj.ground_truth, img) # img = iu.remove_connected_comp(np.array(segmented_img, dtype=np.uint8), # connected_comp_diam_limit=10) IMG.fromarray(np.array(img, dtype=np.uint8)).save(
class ThrnetTrainer(NNTrainer): if gen_images: img = segmented_img.clone().cpu().numpy() img_score.add_array(img, img_obj.ground_truth) # img = iu.remove_connected_comp(np.array(segmented_img, dtype=np.uint8), # connected_comp_diam_limit=10) IMG.fromarray(np.array(img, dtype=np.uint8)).save(
516
https://:@github.com/sraashis/ature.git
934f9ff3b34b487614e4f2653c153cd78d716b57
@@ -59,7 +59,7 @@ class UNetNNTrainer(NNTrainer): if gen_images: map_img = map_img.cpu().numpy() predicted_img = predicted_img.cpu().numpy() - img_score.add_array(img_obj.ground_truth, predicted_img) + img_score.add_array(predicted_img, img_obj.ground_truth) IMG.fromarray(np.array(predicted_img, dtype=np.uint8)).save( os.path.join(self.log_dir, 'pred_' + img_obj.file_name.split('.')[0] + '.png')) IMG.fromarray(np.array(map_img, dtype=np.uint8)).save(
neuralnet/unet/unet_trainer.py
ArgSwap(idxs=0<->1 @(62,20)->(62,39))
class UNetNNTrainer(NNTrainer): if gen_images: map_img = map_img.cpu().numpy() predicted_img = predicted_img.cpu().numpy() img_score.add_array(img_obj.ground_truth, predicted_img) IMG.fromarray(np.array(predicted_img, dtype=np.uint8)).save( os.path.join(self.log_dir, 'pred_' + img_obj.file_name.split('.')[0] + '.png')) IMG.fromarray(np.array(map_img, dtype=np.uint8)).save(
class UNetNNTrainer(NNTrainer): if gen_images: map_img = map_img.cpu().numpy() predicted_img = predicted_img.cpu().numpy() img_score.add_array(predicted_img, img_obj.ground_truth) IMG.fromarray(np.array(predicted_img, dtype=np.uint8)).save( os.path.join(self.log_dir, 'pred_' + img_obj.file_name.split('.')[0] + '.png')) IMG.fromarray(np.array(map_img, dtype=np.uint8)).save(
517
https://:@github.com/tessgi/ticgen.git
9350670fed545226d33c16432802990b6ddd7274
@@ -144,7 +144,7 @@ class Star(object): E * self.Tmag**4 + F * self.Tmag**5) def get_oneSigmaNoise(self): - return (np.exp(self.get_oneHourNoiseLnsigma()) * + return (np.exp(self.get_oneHourNoiseLnsigma()) / np.sqrt(self.integration / 60.)) def TESS_Mag_VJKs(self):
ticgen/ticgen.py
ReplaceText(target='/' @(147,55)->(147,56))
class Star(object): E * self.Tmag**4 + F * self.Tmag**5) def get_oneSigmaNoise(self): return (np.exp(self.get_oneHourNoiseLnsigma()) * np.sqrt(self.integration / 60.)) def TESS_Mag_VJKs(self):
class Star(object): E * self.Tmag**4 + F * self.Tmag**5) def get_oneSigmaNoise(self): return (np.exp(self.get_oneHourNoiseLnsigma()) / np.sqrt(self.integration / 60.)) def TESS_Mag_VJKs(self):
518
https://:@github.com/aki-nishimura/bayes-bridge.git
728164889880a793dd156bafeeaaec2da93c4706
@@ -94,7 +94,7 @@ class SparseRegressionCoefficientSampler(): )) precond_hessian_matvec = lambda beta: \ precond_prior_prec * beta \ - + precond_scale * loglik_hessian_matvec(precond_scale * beta) + - precond_scale * loglik_hessian_matvec(precond_scale * beta) precond_hessian_op = sp.sparse.linalg.LinearOperator( (X.shape[1], X.shape[1]), precond_hessian_matvec )
bayesbridge/reg_coef_sampler/reg_coef_sampler.py
ReplaceText(target='-' @(97,12)->(97,13))
class SparseRegressionCoefficientSampler(): )) precond_hessian_matvec = lambda beta: \ precond_prior_prec * beta \ + precond_scale * loglik_hessian_matvec(precond_scale * beta) precond_hessian_op = sp.sparse.linalg.LinearOperator( (X.shape[1], X.shape[1]), precond_hessian_matvec )
class SparseRegressionCoefficientSampler(): )) precond_hessian_matvec = lambda beta: \ precond_prior_prec * beta \ - precond_scale * loglik_hessian_matvec(precond_scale * beta) precond_hessian_op = sp.sparse.linalg.LinearOperator( (X.shape[1], X.shape[1]), precond_hessian_matvec )
519
https://:@github.com/aki-nishimura/bayes-bridge.git
8558d665b0d07e1565d88b4d477a67c576c7ffbd
@@ -70,7 +70,7 @@ class BayesBridge(): self.model = LinearModel(outcome, X) elif model == 'logit': n_success, n_trial = outcome - self.model = LogisticModel(n_success, X, n_trial) + self.model = LogisticModel(n_success, n_trial, X) elif model == 'cox': self.model = CoxModel(event_time, censoring_time, X) else:
bayesbridge/bayesbridge.py
ArgSwap(idxs=1<->2 @(73,25)->(73,38))
class BayesBridge(): self.model = LinearModel(outcome, X) elif model == 'logit': n_success, n_trial = outcome self.model = LogisticModel(n_success, X, n_trial) elif model == 'cox': self.model = CoxModel(event_time, censoring_time, X) else:
class BayesBridge(): self.model = LinearModel(outcome, X) elif model == 'logit': n_success, n_trial = outcome self.model = LogisticModel(n_success, n_trial, X) elif model == 'cox': self.model = CoxModel(event_time, censoring_time, X) else:
520
https://:@github.com/BTrDB/btrdb4-python.git
68f67274b6c2a6bb4a9206cfb00e7c3747cab1fa
@@ -36,7 +36,7 @@ from btrdb4.endpoint import Endpoint from btrdb4.utils import * MIN_TIME = -(16 << 56) -MAX_TIME = 48 >> 56 +MAX_TIME = 48 << 56 MAX_POINTWIDTH = 63 class Connection(object):
btrdb4/__init__.py
ReplaceText(target='<<' @(39,14)->(39,16))
from btrdb4.endpoint import Endpoint from btrdb4.utils import * MIN_TIME = -(16 << 56) MAX_TIME = 48 >> 56 MAX_POINTWIDTH = 63 class Connection(object):
from btrdb4.endpoint import Endpoint from btrdb4.utils import * MIN_TIME = -(16 << 56) MAX_TIME = 48 << 56 MAX_POINTWIDTH = 63 class Connection(object):
521
https://:@github.com/mahaoyang/newspaper.git
ac1d4c7346ddd0e035f9c7968e7e712340723934
@@ -62,7 +62,7 @@ def prepare_url(url, source_url=None): """operations that purify a url, removes arguments, redirects, and merges relatives with absolutes""" - if source_url is None: + if source_url is not None: source_domain = urlparse(source_url).netloc proper_url = urljoin(source_url, url) proper_url = redirect_back(proper_url, source_domain)
newspaper/urls.py
ReplaceText(target=' is not ' @(65,17)->(65,21))
def prepare_url(url, source_url=None): """operations that purify a url, removes arguments, redirects, and merges relatives with absolutes""" if source_url is None: source_domain = urlparse(source_url).netloc proper_url = urljoin(source_url, url) proper_url = redirect_back(proper_url, source_domain)
def prepare_url(url, source_url=None): """operations that purify a url, removes arguments, redirects, and merges relatives with absolutes""" if source_url is not None: source_domain = urlparse(source_url).netloc proper_url = urljoin(source_url, url) proper_url = redirect_back(proper_url, source_domain)
522
https://:@github.com/tmconsulting/onelya-railway-sdk.git
059265060ae8f9828ae6f5d7a143988cd9c5da4c
@@ -28,7 +28,7 @@ class Session(object): if 'Code' in response: raise OnelyaAPIError(method, response, data) - return data + return response def __send_api_request(self, method, data): url = '{}{}'.format(Session.API_URL, method)
onelya_railway_sdk/session.py
ReplaceText(target='response' @(31,15)->(31,19))
class Session(object): if 'Code' in response: raise OnelyaAPIError(method, response, data) return data def __send_api_request(self, method, data): url = '{}{}'.format(Session.API_URL, method)
class Session(object): if 'Code' in response: raise OnelyaAPIError(method, response, data) return response def __send_api_request(self, method, data): url = '{}{}'.format(Session.API_URL, method)
523
https://:@github.com/rafalp/misago-social-app-django.git
d1d23e7e3cf4364c0d35289290b27787b84f5211
@@ -50,7 +50,7 @@ def sanitize_redirect(host, redirect_to): """ # Quick sanity check. if not redirect_to or \ - not isinstance(redirect_to, six.string_types) and \ + not isinstance(redirect_to, six.string_types) or \ getattr(redirect_to, 'decode', None) and \ not isinstance(redirect_to.decode(), six.string_types): return None
social/utils.py
ReplaceText(target='or' @(53,53)->(53,56))
def sanitize_redirect(host, redirect_to): """ # Quick sanity check. if not redirect_to or \ not isinstance(redirect_to, six.string_types) and \ getattr(redirect_to, 'decode', None) and \ not isinstance(redirect_to.decode(), six.string_types): return None
def sanitize_redirect(host, redirect_to): """ # Quick sanity check. if not redirect_to or \ not isinstance(redirect_to, six.string_types) or \ getattr(redirect_to, 'decode', None) and \ not isinstance(redirect_to.decode(), six.string_types): return None
524
https://:@github.com/rafalp/misago-social-app-django.git
7d0628e7a756526b50449435eb02b2806e815755
@@ -132,7 +132,7 @@ def partial_pipeline_data(strategy, user, *args, **kwargs): kwargs.setdefault('user', user) kwargs.setdefault('request', strategy.request) kwargs.update(xkwargs) - return idx, backend, xargs, xkwargs + return idx, backend, xargs, kwargs def build_absolute_uri(host_url, path=None):
social/utils.py
ReplaceText(target='kwargs' @(135,36)->(135,43))
def partial_pipeline_data(strategy, user, *args, **kwargs): kwargs.setdefault('user', user) kwargs.setdefault('request', strategy.request) kwargs.update(xkwargs) return idx, backend, xargs, xkwargs def build_absolute_uri(host_url, path=None):
def partial_pipeline_data(strategy, user, *args, **kwargs): kwargs.setdefault('user', user) kwargs.setdefault('request', strategy.request) kwargs.update(xkwargs) return idx, backend, xargs, kwargs def build_absolute_uri(host_url, path=None):
525
https://:@github.com/rafalp/misago-social-app-django.git
d53529b57f0a4992889ad490e5314a2244155afa
@@ -27,7 +27,7 @@ class SocialAuthExceptionMiddleware(object): return if isinstance(exception, SocialAuthBaseException): - backend_name = strategy.backend.name + backend_name = request.backend.name message = self.get_message(request, exception) url = self.get_redirect_uri(request, exception) try:
social/apps/django_app/middleware.py
ReplaceText(target='request' @(30,27)->(30,35))
class SocialAuthExceptionMiddleware(object): return if isinstance(exception, SocialAuthBaseException): backend_name = strategy.backend.name message = self.get_message(request, exception) url = self.get_redirect_uri(request, exception) try:
class SocialAuthExceptionMiddleware(object): return if isinstance(exception, SocialAuthBaseException): backend_name = request.backend.name message = self.get_message(request, exception) url = self.get_redirect_uri(request, exception) try:
526
https://:@github.com/c137digital/unv_app.git
d217fa0d780bc6f2acbbf4ea307630c78a695653
@@ -34,7 +34,7 @@ class ComponentSettings: """Create app settings, overrided by env.""" settings = settings or {} if base_settings: - settings = update_dict_recur(settings, base_settings) + settings = update_dict_recur(base_settings, settings) for key, value in os.environ.items(): if 'SETTINGS_' not in key: continue
src/unv/app/settings.py
ArgSwap(idxs=0<->1 @(37,23)->(37,40))
class ComponentSettings: """Create app settings, overrided by env.""" settings = settings or {} if base_settings: settings = update_dict_recur(settings, base_settings) for key, value in os.environ.items(): if 'SETTINGS_' not in key: continue
class ComponentSettings: """Create app settings, overrided by env.""" settings = settings or {} if base_settings: settings = update_dict_recur(base_settings, settings) for key, value in os.environ.items(): if 'SETTINGS_' not in key: continue
527
https://:@github.com/timothycrosley/blox.git
fdc21c5be25dd5452075748e4027e067bfb28e1e
@@ -141,7 +141,7 @@ class TagAttributes(type): full_attribute_map = dict(parents[0].attribute_map) full_attribute_map.update(attribute_map) attribute_map = full_attribute_map - class_dict['attribute_map'] = full_attributes + class_dict['attribute_map'] = attribute_map class_dict['attribute_descriptors'] = attributes attribute_signals = (attribute.signal for attribute in attributes.values() if getattr(attribute, 'signal'))
blox/base.py
ReplaceText(target='attribute_map' @(144,46)->(144,61))
class TagAttributes(type): full_attribute_map = dict(parents[0].attribute_map) full_attribute_map.update(attribute_map) attribute_map = full_attribute_map class_dict['attribute_map'] = full_attributes class_dict['attribute_descriptors'] = attributes attribute_signals = (attribute.signal for attribute in attributes.values() if getattr(attribute, 'signal'))
class TagAttributes(type): full_attribute_map = dict(parents[0].attribute_map) full_attribute_map.update(attribute_map) attribute_map = full_attribute_map class_dict['attribute_map'] = attribute_map class_dict['attribute_descriptors'] = attributes attribute_signals = (attribute.signal for attribute in attributes.values() if getattr(attribute, 'signal'))
528
https://:@github.com/Vizzuality/LMIPy.git
abd7e5a3a0d4e6f5fdaf2798647adb88c490cd6b
@@ -557,5 +557,5 @@ class Layer: # confirm update # update other layer - target_layer.update(update_params=payload, token=token) + target_layer.update(update_params=filtered_payload, token=token)
LMIPy/layer.py
ReplaceText(target='filtered_payload' @(560,42)->(560,49))
class Layer: # confirm update # update other layer target_layer.update(update_params=payload, token=token)
class Layer: # confirm update # update other layer target_layer.update(update_params=filtered_payload, token=token)
529
https://:@github.com/collective/mr.cabot.git
9c955abc239fcdff9bd12db236142387bce61f21
@@ -7,7 +7,7 @@ def html_snippet(obj): else: lat, lon = loc content = IListing(obj).summary - if lat < 0: + if lat > 0: hemi = "west" else: hemi = "east"
src/mr/cabot/html.py
ReplaceText(target='>' @(10,11)->(10,12))
def html_snippet(obj): else: lat, lon = loc content = IListing(obj).summary if lat < 0: hemi = "west" else: hemi = "east"
def html_snippet(obj): else: lat, lon = loc content = IListing(obj).summary if lat > 0: hemi = "west" else: hemi = "east"
530
https://:@github.com/TeamSpen210/srctools.git
079e1c394836a6daecc3e4c996514286937a98dc
@@ -392,7 +392,7 @@ class ModelManager: with open(qc.ref_smd, 'rb') as fb: child_ref = Mesh.parse_smd(fb) - if prop.skin != 0 and prop.skin <= len(mdl.skins): + if prop.skin != 0 and prop.skin < len(mdl.skins): # We need to rename the materials to match the skin. swap_skins = dict(zip( mdl.skins[0],
srctools/compiler/propcombine.py
ReplaceText(target='<' @(395,48)->(395,50))
class ModelManager: with open(qc.ref_smd, 'rb') as fb: child_ref = Mesh.parse_smd(fb) if prop.skin != 0 and prop.skin <= len(mdl.skins): # We need to rename the materials to match the skin. swap_skins = dict(zip( mdl.skins[0],
class ModelManager: with open(qc.ref_smd, 'rb') as fb: child_ref = Mesh.parse_smd(fb) if prop.skin != 0 and prop.skin < len(mdl.skins): # We need to rename the materials to match the skin. swap_skins = dict(zip( mdl.skins[0],
531
https://:@github.com/TeamSpen210/srctools.git
ef4510d1517525251ae2f62f8be714b068e4d909
@@ -1799,7 +1799,7 @@ class Entity: base_name = orig_name.rstrip('0123456789') - if self.map.by_target[orig_name]: + if self.map.by_target[base_name]: # Check every index in order. for i in itertools.count(start=1): name = base_name + str(i)
srctools/vmf.py
ReplaceText(target='base_name' @(1802,30)->(1802,39))
class Entity: base_name = orig_name.rstrip('0123456789') if self.map.by_target[orig_name]: # Check every index in order. for i in itertools.count(start=1): name = base_name + str(i)
class Entity: base_name = orig_name.rstrip('0123456789') if self.map.by_target[base_name]: # Check every index in order. for i in itertools.count(start=1): name = base_name + str(i)
532
https://:@github.com/TeamSpen210/srctools.git
3954c99aa7922609c5c90a40b239ef57d747c5cc
@@ -88,7 +88,7 @@ def main(argv: List[str]) -> None: LOGGER.warning('No studiomdl path provided.') studiomdl_loc = None - run_transformations(vmf, fsys, packlist, path, game_info, studiomdl_loc) + run_transformations(vmf, fsys, packlist, bsp_file, game_info, studiomdl_loc) if studiomdl_loc is not None and args.propcombine: LOGGER.info('Combining props...')
srctools/scripts/postcompiler.py
ReplaceText(target='bsp_file' @(91,45)->(91,49))
def main(argv: List[str]) -> None: LOGGER.warning('No studiomdl path provided.') studiomdl_loc = None run_transformations(vmf, fsys, packlist, path, game_info, studiomdl_loc) if studiomdl_loc is not None and args.propcombine: LOGGER.info('Combining props...')
def main(argv: List[str]) -> None: LOGGER.warning('No studiomdl path provided.') studiomdl_loc = None run_transformations(vmf, fsys, packlist, bsp_file, game_info, studiomdl_loc) if studiomdl_loc is not None and args.propcombine: LOGGER.info('Combining props...')
533
https://:@github.com/garicchi/pyassistant.git
46494b523af213c34a755bfee77fdc0ff00525b1
@@ -31,4 +31,4 @@ class Assistant(): with open(self.setting_file, 'w') as f: json.dump(self.setting,f) - return True \ No newline at end of file + return False \ No newline at end of file
assistant/util/assistant.py
ReplaceText(target='False' @(34,15)->(34,19))
class Assistant(): with open(self.setting_file, 'w') as f: json.dump(self.setting,f) return True \ No newline at end of file \ No newline at end of file
class Assistant(): with open(self.setting_file, 'w') as f: json.dump(self.setting,f) \ No newline at end of file return False \ No newline at end of file
534
https://:@github.com/u0078867/PyBiomech.git
b817b7617d9d57c39c72d8288d8cdb5748b9755c
@@ -272,7 +272,7 @@ def evalPolynomialDerivative(poly, u, der=1): tck, dummy = interpolate.splprep([x.tolist(),x.tolist()], s=0, k=1) xU = np.array(interpolate.splev(u, tck)[1]) out = f2(xU) - p = np.array([np.ones((x.shape[0],)), out]).T + p = np.array([np.ones((xU.shape[0],)), out]).T return p
src/PyBiomech/vtkh.py
ReplaceText(target='xU' @(275,27)->(275,28))
def evalPolynomialDerivative(poly, u, der=1): tck, dummy = interpolate.splprep([x.tolist(),x.tolist()], s=0, k=1) xU = np.array(interpolate.splev(u, tck)[1]) out = f2(xU) p = np.array([np.ones((x.shape[0],)), out]).T return p
def evalPolynomialDerivative(poly, u, der=1): tck, dummy = interpolate.splprep([x.tolist(),x.tolist()], s=0, k=1) xU = np.array(interpolate.splev(u, tck)[1]) out = f2(xU) p = np.array([np.ones((xU.shape[0],)), out]).T return p
535
https://:@github.com/mardix/gokku.git
c44093fc8d160748f4c6902e590ecd2193fdeb90
@@ -509,7 +509,7 @@ def deploy_node(app, deltas={}): first_time = False if not exists(virtualenv_path): echo("-----> Creating virtualenv_path for '{}'".format(app), fg='green') - makedirs(node_path) + makedirs(virtualenv_path) first_time = True if not exists(node_path): echo("-----> Creating node_modules for '{}'".format(app), fg='green')
gokku.py
ReplaceText(target='virtualenv_path' @(512,17)->(512,26))
def deploy_node(app, deltas={}): first_time = False if not exists(virtualenv_path): echo("-----> Creating virtualenv_path for '{}'".format(app), fg='green') makedirs(node_path) first_time = True if not exists(node_path): echo("-----> Creating node_modules for '{}'".format(app), fg='green')
def deploy_node(app, deltas={}): first_time = False if not exists(virtualenv_path): echo("-----> Creating virtualenv_path for '{}'".format(app), fg='green') makedirs(virtualenv_path) first_time = True if not exists(node_path): echo("-----> Creating node_modules for '{}'".format(app), fg='green')
536
https://:@github.com/sophacles/pyCLiFF.git
35bf4ff2714329a786c093d7e24a63e6fc1555b9
@@ -16,7 +16,7 @@ def handle_opts(opts): def datahandler(line): global n - if n > clmax: + if n >= clmax: raise StopIteration n += 1
examples/head.py
ReplaceText(target='>=' @(19,9)->(19,10))
def handle_opts(opts): def datahandler(line): global n if n > clmax: raise StopIteration n += 1
def handle_opts(opts): def datahandler(line): global n if n >= clmax: raise StopIteration n += 1
537
https://:@github.com/pilt/flask-versioned.git
2dd47374531a9426845a2b3b10fa5f2dc2418bf2
@@ -45,7 +45,7 @@ class FileChangedDriver(Driver): mods = time.strftime('%Y%m%dT%H%M%S', modt) return self.format % { 'version': mods, - 'path': path, + 'path': stream, }
flaskext/versioned/__init__.py
ReplaceText(target='stream' @(48,20)->(48,24))
class FileChangedDriver(Driver): mods = time.strftime('%Y%m%dT%H%M%S', modt) return self.format % { 'version': mods, 'path': path, }
class FileChangedDriver(Driver): mods = time.strftime('%Y%m%dT%H%M%S', modt) return self.format % { 'version': mods, 'path': stream, }
538
https://:@github.com/lantunes/netomaton.git
7d73976ac295f05911b9b67e495bd54021f97644
@@ -7,7 +7,7 @@ if __name__ == '__main__': initial_conditions = [0] * 100 + [1] + [0] * 99 - activities, connectivities = evolve(adjacencies, initial_conditions, timesteps=100, + activities, connectivities = evolve(initial_conditions, adjacencies, timesteps=100, activity_rule=lambda n, c, t: ActivityRule.nks_ca_rule(n, c, 30)) plot_grid(activities)
demos/elementary_ca/elementary_ca_demo.py
ArgSwap(idxs=0<->1 @(10,33)->(10,39))
if __name__ == '__main__': initial_conditions = [0] * 100 + [1] + [0] * 99 activities, connectivities = evolve(adjacencies, initial_conditions, timesteps=100, activity_rule=lambda n, c, t: ActivityRule.nks_ca_rule(n, c, 30)) plot_grid(activities)
if __name__ == '__main__': initial_conditions = [0] * 100 + [1] + [0] * 99 activities, connectivities = evolve(initial_conditions, adjacencies, timesteps=100, activity_rule=lambda n, c, t: ActivityRule.nks_ca_rule(n, c, 30)) plot_grid(activities)
539
https://:@github.com/lantunes/netomaton.git
7d73976ac295f05911b9b67e495bd54021f97644
@@ -61,7 +61,7 @@ if __name__ == '__main__': initial_conditions = half_two - activities, connectivities = evolve(hopfield_net.adjacency_matrix, initial_conditions, timesteps=155, + activities, connectivities = evolve(initial_conditions, hopfield_net.adjacency_matrix, timesteps=155, activity_rule=hopfield_net.activity_rule) # view the weights, stored in the adjacency matrix
demos/hopfield_net/hopfield_net_demo.py
ArgSwap(idxs=0<->1 @(64,33)->(64,39))
if __name__ == '__main__': initial_conditions = half_two activities, connectivities = evolve(hopfield_net.adjacency_matrix, initial_conditions, timesteps=155, activity_rule=hopfield_net.activity_rule) # view the weights, stored in the adjacency matrix
if __name__ == '__main__': initial_conditions = half_two activities, connectivities = evolve(initial_conditions, hopfield_net.adjacency_matrix, timesteps=155, activity_rule=hopfield_net.activity_rule) # view the weights, stored in the adjacency matrix
540
https://:@github.com/orionw/tuningDEAP.git
7e7a40a386832fd386dfb8f67ff10e27ade0421e
@@ -96,7 +96,7 @@ class TestConfigMapping(unittest.TestCase): assert value == bool(new_value) and type(value) == bool, "did not set the correct value from the map, expected {} but got {} (which is not the original {})".format("bool", bool(new_value), type(value), value) set_by_path(test_config, path, new_value, is_bool=False) int_value = get_by_path(test_config, path) - assert new_value == int_value and type(int_value) == int, "did not set the correct value from the map, expected type {} = {} but got type {} = {})".format("int", int_value, type(int_value), new_value) + assert value == int_value and type(int_value) == int, "did not set the correct value from the map, expected type {} = {} but got type {} = {})".format("int", int_value, type(int_value), new_value) def test_set_from_map_invalid(self): test_value = "three_levels"
test/test_config_mapping.py
ReplaceText(target='value' @(99,15)->(99,24))
class TestConfigMapping(unittest.TestCase): assert value == bool(new_value) and type(value) == bool, "did not set the correct value from the map, expected {} but got {} (which is not the original {})".format("bool", bool(new_value), type(value), value) set_by_path(test_config, path, new_value, is_bool=False) int_value = get_by_path(test_config, path) assert new_value == int_value and type(int_value) == int, "did not set the correct value from the map, expected type {} = {} but got type {} = {})".format("int", int_value, type(int_value), new_value) def test_set_from_map_invalid(self): test_value = "three_levels"
class TestConfigMapping(unittest.TestCase): assert value == bool(new_value) and type(value) == bool, "did not set the correct value from the map, expected {} but got {} (which is not the original {})".format("bool", bool(new_value), type(value), value) set_by_path(test_config, path, new_value, is_bool=False) int_value = get_by_path(test_config, path) assert value == int_value and type(int_value) == int, "did not set the correct value from the map, expected type {} = {} but got type {} = {})".format("int", int_value, type(int_value), new_value) def test_set_from_map_invalid(self): test_value = "three_levels"
541
https://:@github.com/benjamincrom/scrabble.git
59d26971b75475d0a66aeb03902fda6bfa57a048
@@ -188,7 +188,7 @@ def move_does_not_stack_tiles(letter_list, location_set): # return False def move_is_rack_size_or_less(location_set): - return len(location_set) > config.PLAYER_RACK_SIZE + return len(location_set) <= config.PLAYER_RACK_SIZE # print('Move places greater than seven tiles.') # return False # else:
scrabble_game.py
ReplaceText(target='<=' @(191,29)->(191,30))
def move_does_not_stack_tiles(letter_list, location_set): # return False def move_is_rack_size_or_less(location_set): return len(location_set) > config.PLAYER_RACK_SIZE # print('Move places greater than seven tiles.') # return False # else:
def move_does_not_stack_tiles(letter_list, location_set): # return False def move_is_rack_size_or_less(location_set): return len(location_set) <= config.PLAYER_RACK_SIZE # print('Move places greater than seven tiles.') # return False # else:
542
https://:@bitbucket.org/norok2/pytk.git
934113ff8273afe9c2737a1bdee6f5f298217986
@@ -179,7 +179,7 @@ def center(target, reference=None): geometry = reference.winfo_geometry() else: geometry = reference - if isinstance(reference, str): + if isinstance(geometry, str): geometry = Geometry(geometry) target_geometry = Geometry(target.winfo_geometry()) target.geometry(str(target_geometry.set_to_center(geometry)))
pytk/util.py
ReplaceText(target='geometry' @(182,18)->(182,27))
def center(target, reference=None): geometry = reference.winfo_geometry() else: geometry = reference if isinstance(reference, str): geometry = Geometry(geometry) target_geometry = Geometry(target.winfo_geometry()) target.geometry(str(target_geometry.set_to_center(geometry)))
def center(target, reference=None): geometry = reference.winfo_geometry() else: geometry = reference if isinstance(geometry, str): geometry = Geometry(geometry) target_geometry = Geometry(target.winfo_geometry()) target.geometry(str(target_geometry.set_to_center(geometry)))
543
https://:@github.com/ds4dm/ecole.git
90d17acf7796734b64299619474a60519b19db59
@@ -13,7 +13,7 @@ def test_IsDone(state): def test_NLPIterations(state): reward_func = R.NLPIterations() reward_func.reset(state) - assert reward_func.get(state) >= 0 + assert reward_func.get(state) <= 0 assert reward_func.get(state, done=True) == 0
python/tests/test_reward.py
ReplaceText(target='<=' @(16,34)->(16,36))
def test_IsDone(state): def test_NLPIterations(state): reward_func = R.NLPIterations() reward_func.reset(state) assert reward_func.get(state) >= 0 assert reward_func.get(state, done=True) == 0
def test_IsDone(state): def test_NLPIterations(state): reward_func = R.NLPIterations() reward_func.reset(state) assert reward_func.get(state) <= 0 assert reward_func.get(state, done=True) == 0
544
https://:@github.com/ds4dm/ecole.git
6bc408253cbea5c8cb6740412601d13eb6772039
@@ -101,4 +101,4 @@ def test_LpIterations(model): def test_NNodes(model): reward_func = R.NNodes() reward_func.reset(model) - assert reward_func.obtain_reward(model) <= 0 + assert reward_func.obtain_reward(model) >= 0
python/tests/test_reward.py
ReplaceText(target='>=' @(104,44)->(104,46))
def test_LpIterations(model): def test_NNodes(model): reward_func = R.NNodes() reward_func.reset(model) assert reward_func.obtain_reward(model) <= 0
def test_LpIterations(model): def test_NNodes(model): reward_func = R.NNodes() reward_func.reset(model) assert reward_func.obtain_reward(model) >= 0
545
https://:@github.com/nodedge/nodedge.git
b610ea8a12132e143aafe2824a83a9c032a973f7
@@ -313,7 +313,7 @@ class GraphicsView(QGraphicsView): event.localPos(), event.screenPos(), Qt.LeftButton, - event.buttons() & -Qt.LeftButton, + event.buttons() | -Qt.LeftButton, event.modifiers(), ) super().mouseReleaseEvent(fake_event)
nodedge/graphics_view.py
ReplaceText(target='|' @(316,28)->(316,29))
class GraphicsView(QGraphicsView): event.localPos(), event.screenPos(), Qt.LeftButton, event.buttons() & -Qt.LeftButton, event.modifiers(), ) super().mouseReleaseEvent(fake_event)
class GraphicsView(QGraphicsView): event.localPos(), event.screenPos(), Qt.LeftButton, event.buttons() | -Qt.LeftButton, event.modifiers(), ) super().mouseReleaseEvent(fake_event)
546
https://:@github.com/ovnicraft/suds2.git
b5d1aa94c6c825f717bf41f687ca687241aeae43
@@ -130,7 +130,7 @@ class UMBase: return content.data lang = attributes.lang() if not len(node.children) and content.text is None: - if self.nillable(content.data) and content.node.isnil(): + if self.nillable(content.data) or content.node.isnil(): return None else: return xlstr.string('', lang)
suds/bindings/unmarshaller.py
ReplaceText(target='or' @(133,43)->(133,46))
class UMBase: return content.data lang = attributes.lang() if not len(node.children) and content.text is None: if self.nillable(content.data) and content.node.isnil(): return None else: return xlstr.string('', lang)
class UMBase: return content.data lang = attributes.lang() if not len(node.children) and content.text is None: if self.nillable(content.data) or content.node.isnil(): return None else: return xlstr.string('', lang)
547
https://:@github.com/ovnicraft/suds2.git
e54498f49a9973bc9a385885b5a220026503f22b
@@ -113,6 +113,6 @@ class Options(Skin): Definition('retxml', bool, False), Definition('autoblend', bool, False), Definition('cachingpolicy', int, 0), - Definition('plugins', [], (list, tuple)), + Definition('plugins', (list, tuple), []), ] Skin.__init__(self, domain, definitions, kwargs)
suds/options.py
ArgSwap(idxs=1<->2 @(116,12)->(116,22))
class Options(Skin): Definition('retxml', bool, False), Definition('autoblend', bool, False), Definition('cachingpolicy', int, 0), Definition('plugins', [], (list, tuple)), ] Skin.__init__(self, domain, definitions, kwargs)
class Options(Skin): Definition('retxml', bool, False), Definition('autoblend', bool, False), Definition('cachingpolicy', int, 0), Definition('plugins', (list, tuple), []), ] Skin.__init__(self, domain, definitions, kwargs)
548
https://:@github.com/lukegb/ticketml.git
99c13a8e34a4397470564a355d639a4fbd5c72c4
@@ -122,7 +122,7 @@ class BaseBackend(object): self._serial.write(h2b('1d21' + hex)) def get_characters_per_line(self, font_width): - return self.BASE_CHARS_PER_LINE / font_width + return self.BASE_CHARS_PER_LINE // font_width class Ibm4610Backend(BaseBackend): BARCODE_MAP = {
ticketml/ticketml.py
ReplaceText(target='//' @(125,40)->(125,41))
class BaseBackend(object): self._serial.write(h2b('1d21' + hex)) def get_characters_per_line(self, font_width): return self.BASE_CHARS_PER_LINE / font_width class Ibm4610Backend(BaseBackend): BARCODE_MAP = {
class BaseBackend(object): self._serial.write(h2b('1d21' + hex)) def get_characters_per_line(self, font_width): return self.BASE_CHARS_PER_LINE // font_width class Ibm4610Backend(BaseBackend): BARCODE_MAP = {
549
https://:@github.com/EmanuelGoncalves/crispy.git
2a029a350d328a65b32b5434a6b555dcc94a562c
@@ -60,7 +60,7 @@ def iterate_correction(crispr_file, crispr_lib_file, cnv_file, output_folder, bs assert len(overlap_genes) > 0, 'No genes (rows) overlap between CRISPR and Copy-number matrices' # run correction for each cell line - for sample in overlap_genes: + for sample in overlap_samples: if bsub_flag: print('[{}] Crispy: bsub {}'.format(dt.now().strftime('%Y-%m-%d %H:%M:%S'), sample))
scripts/crispy/processing/correct_cnv_bias.py
ReplaceText(target='overlap_samples' @(63,18)->(63,31))
def iterate_correction(crispr_file, crispr_lib_file, cnv_file, output_folder, bs assert len(overlap_genes) > 0, 'No genes (rows) overlap between CRISPR and Copy-number matrices' # run correction for each cell line for sample in overlap_genes: if bsub_flag: print('[{}] Crispy: bsub {}'.format(dt.now().strftime('%Y-%m-%d %H:%M:%S'), sample))
def iterate_correction(crispr_file, crispr_lib_file, cnv_file, output_folder, bs assert len(overlap_genes) > 0, 'No genes (rows) overlap between CRISPR and Copy-number matrices' # run correction for each cell line for sample in overlap_samples: if bsub_flag: print('[{}] Crispy: bsub {}'.format(dt.now().strftime('%Y-%m-%d %H:%M:%S'), sample))
550
https://:@github.com/ninapavlich/sitecomber-article-tests.git
a053bd33aa15bf1c6e4a2dded9e5e1a71511e9bc
@@ -159,4 +159,4 @@ def check_spelling(page, settings): message = "No misspellings found" if not found_misspellings else u'Found %s misspelling(s): "%s"' % (len(misspelled), '", "'.join(misspelled)) return found_misspellings, message - return True, 'No article found' + return False, 'No article found'
sitecomber_article_tests/utils.py
ReplaceText(target='False' @(162,11)->(162,15))
def check_spelling(page, settings): message = "No misspellings found" if not found_misspellings else u'Found %s misspelling(s): "%s"' % (len(misspelled), '", "'.join(misspelled)) return found_misspellings, message return True, 'No article found'
def check_spelling(page, settings): message = "No misspellings found" if not found_misspellings else u'Found %s misspelling(s): "%s"' % (len(misspelled), '", "'.join(misspelled)) return found_misspellings, message return False, 'No article found'
551
https://:@github.com/salpreh/tablat.git
fef6e07276bced6e885653ef760884a0ee5e0606
@@ -132,7 +132,7 @@ class Table(object): end = row_lenght filt_data = [] - while end < len(self._table_data): + while end <= len(self._table_data): filt_data.extend(self._filter_list(self._table_data[start:end], mask)) start = end end += row_lenght
tablat/Table.py
ReplaceText(target='<=' @(135,18)->(135,19))
class Table(object): end = row_lenght filt_data = [] while end < len(self._table_data): filt_data.extend(self._filter_list(self._table_data[start:end], mask)) start = end end += row_lenght
class Table(object): end = row_lenght filt_data = [] while end <= len(self._table_data): filt_data.extend(self._filter_list(self._table_data[start:end], mask)) start = end end += row_lenght
552
https://:@github.com/cloudshare/cloudshare-py-sdk.git
1363279f77267f5e18ec26f2d1bd345b3adea08e
@@ -156,7 +156,7 @@ def request(method, path, queryParams=None, content=None): path=path, queryParams=queryParams, content=content) - if res.status / 100 != 2: + if res.status // 100 != 2: raise Exception('{} {}'.format(res.status, res.content['message'])) return res.content
example.py
ReplaceText(target='//' @(159,18)->(159,19))
def request(method, path, queryParams=None, content=None): path=path, queryParams=queryParams, content=content) if res.status / 100 != 2: raise Exception('{} {}'.format(res.status, res.content['message'])) return res.content
def request(method, path, queryParams=None, content=None): path=path, queryParams=queryParams, content=content) if res.status // 100 != 2: raise Exception('{} {}'.format(res.status, res.content['message'])) return res.content
553
https://:@github.com/Oslandia/deeposlandia.git
529dd916303d712ea3c51bb8454349812537e506
@@ -111,7 +111,7 @@ if __name__ == '__main__': testing_dataset.load(testing_filename, args.nb_testing_image) else: input_image_dir = os.path.join(input_repo, "testing") - testing_dataset.populate(input_image_dir, preprocessed_testing_path, + testing_dataset.populate(preprocessed_testing_path, input_image_dir, nb_images=args.nb_testing_image, labelling=False) testing_dataset.save(testing_filename)
sources/test.py
ArgSwap(idxs=0<->1 @(114,8)->(114,32))
if __name__ == '__main__': testing_dataset.load(testing_filename, args.nb_testing_image) else: input_image_dir = os.path.join(input_repo, "testing") testing_dataset.populate(input_image_dir, preprocessed_testing_path, nb_images=args.nb_testing_image, labelling=False) testing_dataset.save(testing_filename)
if __name__ == '__main__': testing_dataset.load(testing_filename, args.nb_testing_image) else: input_image_dir = os.path.join(input_repo, "testing") testing_dataset.populate(preprocessed_testing_path, input_image_dir, nb_images=args.nb_testing_image, labelling=False) testing_dataset.save(testing_filename)
554
https://:@github.com/Oslandia/deeposlandia.git
2bc626291e2f069c68e4a5a43e6ef5bfc3059d6a
@@ -158,7 +158,7 @@ class SemanticSegmentationModel(ConvolutionalNeuralNetwork): name=dataset_type+"_images") label_filepaths = [dataset.image_info[i]["label_filename"] for i in range(dataset.get_nb_images())] - label_tensors = ops.convert_to_tensor(image_filepaths, dtype=tf.string, + label_tensors = ops.convert_to_tensor(label_filepaths, dtype=tf.string, name=dataset_type+"_labels") input_queue = tf.train.slice_input_producer([image_tensors, label_tensors],
sources/semantic_segmentation.py
ReplaceText(target='label_filepaths' @(161,50)->(161,65))
class SemanticSegmentationModel(ConvolutionalNeuralNetwork): name=dataset_type+"_images") label_filepaths = [dataset.image_info[i]["label_filename"] for i in range(dataset.get_nb_images())] label_tensors = ops.convert_to_tensor(image_filepaths, dtype=tf.string, name=dataset_type+"_labels") input_queue = tf.train.slice_input_producer([image_tensors, label_tensors],
class SemanticSegmentationModel(ConvolutionalNeuralNetwork): name=dataset_type+"_images") label_filepaths = [dataset.image_info[i]["label_filename"] for i in range(dataset.get_nb_images())] label_tensors = ops.convert_to_tensor(label_filepaths, dtype=tf.string, name=dataset_type+"_labels") input_queue = tf.train.slice_input_producer([image_tensors, label_tensors],
555
https://:@github.com/Oslandia/deeposlandia.git
09e5206460eae73b533e9007ca5241bd5baee7f6
@@ -449,7 +449,7 @@ def main(args): "semseg", "predicted_geometries", ) - os.makedirs(predicted_label_folder, exist_ok=True) + os.makedirs(predicted_geom_folder, exist_ok=True) predicted_geom_file = os.path.join( predicted_geom_folder, args.image_basename + "_" + str(args.image_size) + ".geojson",
deeposlandia/postprocess.py
ReplaceText(target='predicted_geom_folder' @(452,16)->(452,38))
def main(args): "semseg", "predicted_geometries", ) os.makedirs(predicted_label_folder, exist_ok=True) predicted_geom_file = os.path.join( predicted_geom_folder, args.image_basename + "_" + str(args.image_size) + ".geojson",
def main(args): "semseg", "predicted_geometries", ) os.makedirs(predicted_geom_folder, exist_ok=True) predicted_geom_file = os.path.join( predicted_geom_folder, args.image_basename + "_" + str(args.image_size) + ".geojson",
556
https://:@github.com/metal3d/keras-video-generators.git
7313aa497a80147714dda3581ce34d4821130ae1
@@ -72,7 +72,7 @@ class VideoFrameGenerator(Sequence): # split factor should be a propoer value if split is not None: - assert 0.0 > split < 1.0 + assert 0.0 < split < 1.0 # be sure that classes are well ordered classes.sort()
src/keras_video/generator.py
ReplaceText(target='<' @(75,23)->(75,24))
class VideoFrameGenerator(Sequence): # split factor should be a propoer value if split is not None: assert 0.0 > split < 1.0 # be sure that classes are well ordered classes.sort()
class VideoFrameGenerator(Sequence): # split factor should be a propoer value if split is not None: assert 0.0 < split < 1.0 # be sure that classes are well ordered classes.sort()
557
https://:@github.com/yongzhuo/Macropodus.git
11789503e9122303464bf6f02df1122397c0ca97
@@ -142,7 +142,7 @@ class RandomEmbedding(BaseEmbedding): else: raise RuntimeError("your input level_type is wrong, it must be 'word', 'char', 'ngram'") for text_one in text: - if term_one not in token2idx: + if text_one not in token2idx: token2idx[text_one] = len(token2idx) else: raise RuntimeError("your input corpus_path is wrong, it must be 'dict' or 'corpus'")
macropodus/network/base/embedding.py
ReplaceText(target='text_one' @(145,27)->(145,35))
class RandomEmbedding(BaseEmbedding): else: raise RuntimeError("your input level_type is wrong, it must be 'word', 'char', 'ngram'") for text_one in text: if term_one not in token2idx: token2idx[text_one] = len(token2idx) else: raise RuntimeError("your input corpus_path is wrong, it must be 'dict' or 'corpus'")
class RandomEmbedding(BaseEmbedding): else: raise RuntimeError("your input level_type is wrong, it must be 'word', 'char', 'ngram'") for text_one in text: if text_one not in token2idx: token2idx[text_one] = len(token2idx) else: raise RuntimeError("your input corpus_path is wrong, it must be 'dict' or 'corpus'")
558
https://:@github.com/foxkit-us/PyIRC.git
c0bcbffa66f6fe1ddb7f3002e85fa38bfaafa103
@@ -156,7 +156,7 @@ class IRCString(UserString): def convert(self, case): """Convert string into another caseform""" - return IRCString(self, case) + return IRCString(case, self) def ascii_lower(self): """Return a copy of the string S converted to lowercase, using ASCII
PyIRC/casemapping.py
ArgSwap(idxs=0<->1 @(159,15)->(159,24))
class IRCString(UserString): def convert(self, case): """Convert string into another caseform""" return IRCString(self, case) def ascii_lower(self): """Return a copy of the string S converted to lowercase, using ASCII
class IRCString(UserString): def convert(self, case): """Convert string into another caseform""" return IRCString(case, self) def ascii_lower(self): """Return a copy of the string S converted to lowercase, using ASCII
559
https://:@github.com/foxkit-us/PyIRC.git
b262e84b066a39cd911a37c5f3ab517413ed707f
@@ -332,7 +332,7 @@ class UserTrack(BaseExtension): basicrfc = self.get_extension("BasicRFC") - if self.casecmp(user.nick, basicrfc.nick): + if self.casecmp(target.nick, basicrfc.nick): # It's us! isupport = self.get_extension("ISupport")
PyIRC/extensions/usertrack.py
ReplaceText(target='target' @(335,24)->(335,28))
class UserTrack(BaseExtension): basicrfc = self.get_extension("BasicRFC") if self.casecmp(user.nick, basicrfc.nick): # It's us! isupport = self.get_extension("ISupport")
class UserTrack(BaseExtension): basicrfc = self.get_extension("BasicRFC") if self.casecmp(target.nick, basicrfc.nick): # It's us! isupport = self.get_extension("ISupport")
560
https://:@github.com/foxkit-us/PyIRC.git
71fa54a10227f629b88a2345a1da19b7db02b862
@@ -48,7 +48,7 @@ class NullSocket(IRCBase): def inject_line(self, line): """Inject a Line into the recvq for the client.""" - assert isinstance(Line, line) + assert isinstance(line, Line) self.recvq.put(line) def loop(self):
PyIRC/io/null.py
ArgSwap(idxs=0<->1 @(51,15)->(51,25))
class NullSocket(IRCBase): def inject_line(self, line): """Inject a Line into the recvq for the client.""" assert isinstance(Line, line) self.recvq.put(line) def loop(self):
class NullSocket(IRCBase): def inject_line(self, line): """Inject a Line into the recvq for the client.""" assert isinstance(line, Line) self.recvq.put(line) def loop(self):
561
https://:@github.com/foxkit-us/PyIRC.git
bf102eab33bde57ba3ae1b240a84ee2d161103f0
@@ -362,7 +362,7 @@ class XTerm16ColourFormatter(ANSIFormatter): if self.background is not None: bgc = ColoursANSI[self.background.name].value - ret.append(str(fgc.background_16)) + ret.append(str(bgc.background_16)) else: # Reset background just in case ret.append(self.fmt_resetbackground)
PyIRC/formatting/formatters.py
ReplaceText(target='bgc' @(365,31)->(365,34))
class XTerm16ColourFormatter(ANSIFormatter): if self.background is not None: bgc = ColoursANSI[self.background.name].value ret.append(str(fgc.background_16)) else: # Reset background just in case ret.append(self.fmt_resetbackground)
class XTerm16ColourFormatter(ANSIFormatter): if self.background is not None: bgc = ColoursANSI[self.background.name].value ret.append(str(bgc.background_16)) else: # Reset background just in case ret.append(self.fmt_resetbackground)
562
https://:@bitbucket.org/pixelforest/pixelforest_drf.git
71155a01605838be2123eae28b650ceae32d7c2c
@@ -14,7 +14,7 @@ from ..permissions import FullDjangoModelPermissions User = get_user_model() -if User is PFUser: +if User is not PFUser: raise ImproperlyConfigured("Pf User is not the User model")
pixelforest_drf/rest/users/api_views.py
ReplaceText(target=' is not ' @(17,7)->(17,11))
from ..permissions import FullDjangoModelPermissions User = get_user_model() if User is PFUser: raise ImproperlyConfigured("Pf User is not the User model")
from ..permissions import FullDjangoModelPermissions User = get_user_model() if User is not PFUser: raise ImproperlyConfigured("Pf User is not the User model")
563
https://:@github.com/sphemakh/meerkathi.git
b6220f225f989604f184d10863e5012f215ed639
@@ -65,7 +65,7 @@ class worker_administrator(object): else: worker = name + '_worker' - self.workers.append((name, worker, order)) + self.workers.append((name, worker, i)) self.workers = sorted(self.workers, key=lambda a: a[2])
meerkathi/workers/worker_administrator.py
ReplaceText(target='i' @(68,47)->(68,52))
class worker_administrator(object): else: worker = name + '_worker' self.workers.append((name, worker, order)) self.workers = sorted(self.workers, key=lambda a: a[2])
class worker_administrator(object): else: worker = name + '_worker' self.workers.append((name, worker, i)) self.workers = sorted(self.workers, key=lambda a: a[2])
564
https://:@github.com/sphemakh/meerkathi.git
8bb75b04fe481c06cf0e7987e0072ed58fea1bc3
@@ -528,7 +528,7 @@ found in our database or in the CASA NRAO database'.format(field)) step = 'plot_fluxscale_{0:d}'.format(i) table = prefix+".F0" fieldtoplot = [] - fieldtoplot.append(utils.get_field_id(msinfo, ref)[0]) + fieldtoplot.append(utils.get_field_id(msinfo, trans)[0]) recipe.add('cab/ragavi', step, { "table" : '{0:s}/{1:s}:{2:s}'.format(get_dir_path(pipeline.caltables, pipeline), table, 'output'),
meerkathi/workers/cross_cal_worker.py
ReplaceText(target='trans' @(531,62)->(531,65))
found in our database or in the CASA NRAO database'.format(field)) step = 'plot_fluxscale_{0:d}'.format(i) table = prefix+".F0" fieldtoplot = [] fieldtoplot.append(utils.get_field_id(msinfo, ref)[0]) recipe.add('cab/ragavi', step, { "table" : '{0:s}/{1:s}:{2:s}'.format(get_dir_path(pipeline.caltables, pipeline), table, 'output'),
found in our database or in the CASA NRAO database'.format(field)) step = 'plot_fluxscale_{0:d}'.format(i) table = prefix+".F0" fieldtoplot = [] fieldtoplot.append(utils.get_field_id(msinfo, trans)[0]) recipe.add('cab/ragavi', step, { "table" : '{0:s}/{1:s}:{2:s}'.format(get_dir_path(pipeline.caltables, pipeline), table, 'output'),
565
https://:@github.com/sphemakh/meerkathi.git
67485769492da55875da1fea491a7a32ce82d16a
@@ -176,7 +176,7 @@ def worker(pipeline, recipe, config): substep = 'delete_flag_versions_after_{0:s}_ms{1:d}'.format(version, target_iter) manflags.delete_cflags(pipeline, recipe, available_flagversions[available_flagversions.index(version)+1], - msname, cab_name=substep) + fms, cab_name=substep) flagv = tms+'.flagversions'
caracal/workers/transform_worker.py
ReplaceText(target='fms' @(179,24)->(179,30))
def worker(pipeline, recipe, config): substep = 'delete_flag_versions_after_{0:s}_ms{1:d}'.format(version, target_iter) manflags.delete_cflags(pipeline, recipe, available_flagversions[available_flagversions.index(version)+1], msname, cab_name=substep) flagv = tms+'.flagversions'
def worker(pipeline, recipe, config): substep = 'delete_flag_versions_after_{0:s}_ms{1:d}'.format(version, target_iter) manflags.delete_cflags(pipeline, recipe, available_flagversions[available_flagversions.index(version)+1], fms, cab_name=substep) flagv = tms+'.flagversions'
566
https://:@gitlab.com/nekokatt/hikari.git
0b4d26aa20ddbf580ad84581bc5eeea34c687896
@@ -48,4 +48,4 @@ class TestReaction: assert re.count == 420 assert re.me is True - test_state.parse_emoji.assert_called_with(None, emoji_dict) + test_state.parse_emoji.assert_called_with(emoji_dict, None)
tests/hikari/core/model/test_reaction.py
ArgSwap(idxs=0<->1 @(51,8)->(51,49))
class TestReaction: assert re.count == 420 assert re.me is True test_state.parse_emoji.assert_called_with(None, emoji_dict)
class TestReaction: assert re.count == 420 assert re.me is True test_state.parse_emoji.assert_called_with(emoji_dict, None)
567
https://:@gitlab.com/nekokatt/hikari.git
b40bd61096cbd597d0f2863351ebddebf571867d
@@ -286,7 +286,7 @@ class DispatchingEventAdapterImpl(dispatching_event_adapter.DispatchingEventAdap for role_id in role_ids: role_obj = self.fabric.state_registry.get_role_by_id(guild_id, role_id) - if role_objs is not None: + if role_obj is not None: role_objs.append(role_obj) else: self.logger.warning(
hikari/orm/dispatching_event_adapter_impl.py
ReplaceText(target='role_obj' @(289,19)->(289,28))
class DispatchingEventAdapterImpl(dispatching_event_adapter.DispatchingEventAdap for role_id in role_ids: role_obj = self.fabric.state_registry.get_role_by_id(guild_id, role_id) if role_objs is not None: role_objs.append(role_obj) else: self.logger.warning(
class DispatchingEventAdapterImpl(dispatching_event_adapter.DispatchingEventAdap for role_id in role_ids: role_obj = self.fabric.state_registry.get_role_by_id(guild_id, role_id) if role_obj is not None: role_objs.append(role_obj) else: self.logger.warning(
568
https://:@gitlab.com/nekokatt/hikari.git
89f2070c7a2135162e4852a8b778490b4695bc78
@@ -699,7 +699,7 @@ class BotAppImpl(bot.IBotApp): if plat == "win32": supports_color |= os.getenv("TERM_PROGRAM", None) == "mintty" supports_color |= "ANSICON" in os.environ - supports_color |= is_a_tty + supports_color &= is_a_tty else: supports_color = is_a_tty
hikari/impl/bot.py
ReplaceText(target='&=' @(702,31)->(702,33))
class BotAppImpl(bot.IBotApp): if plat == "win32": supports_color |= os.getenv("TERM_PROGRAM", None) == "mintty" supports_color |= "ANSICON" in os.environ supports_color |= is_a_tty else: supports_color = is_a_tty
class BotAppImpl(bot.IBotApp): if plat == "win32": supports_color |= os.getenv("TERM_PROGRAM", None) == "mintty" supports_color |= "ANSICON" in os.environ supports_color &= is_a_tty else: supports_color = is_a_tty
569
https://:@github.com/matthewhanson/boto3-utils.git
23390e1f08fda449451266dc7136a89914e00b4e
@@ -33,7 +33,7 @@ class s3(object): _url = deepcopy(url) if url[0:5] == 'https': _url = cls.https_to_s3(url) - if url[0:5] != 's3://': + if _url[0:5] != 's3://': raise Exception('Invalid S3 url %s' % _url) url_obj = _url.replace('s3://', '').split('/')
boto3utils/s3.py
ReplaceText(target='_url' @(36,11)->(36,14))
class s3(object): _url = deepcopy(url) if url[0:5] == 'https': _url = cls.https_to_s3(url) if url[0:5] != 's3://': raise Exception('Invalid S3 url %s' % _url) url_obj = _url.replace('s3://', '').split('/')
class s3(object): _url = deepcopy(url) if url[0:5] == 'https': _url = cls.https_to_s3(url) if _url[0:5] != 's3://': raise Exception('Invalid S3 url %s' % _url) url_obj = _url.replace('s3://', '').split('/')
570
https://:@github.com/spex-xray/pyspextools.git
c68beb5206e3525010e0265f2c18f8f5fc4bebc3
@@ -131,7 +131,7 @@ class OGIPRegion(Region): if isinstance(corr, Pha): self.corr = corr self.input_corr = True - elif back is None: + elif corr is None: self.input_corr = False else: self.input_corr = False
pyspextools/io/ogip.py
ReplaceText(target='corr' @(134,13)->(134,17))
class OGIPRegion(Region): if isinstance(corr, Pha): self.corr = corr self.input_corr = True elif back is None: self.input_corr = False else: self.input_corr = False
class OGIPRegion(Region): if isinstance(corr, Pha): self.corr = corr self.input_corr = True elif corr is None: self.input_corr = False else: self.input_corr = False
571
https://:@github.com/djerbic/xlrd-ignore-writeaccess-corruption.git
8ce449208ac3a8a1f4ec9c72954c11afcd40d3a8
@@ -1975,7 +1975,7 @@ class Sheet(BaseObject): nchars = data2_len - 1 if nb: assert nchars % 2 == 0 - nchars /= 2 + nchars //= 2 utext, endpos = unpack_unicode_update_pos(data2, 0, known_len=nchars) assert endpos == data2_len o.text += utext
xlrd/sheet.py
ReplaceText(target='//=' @(1978,23)->(1978,25))
class Sheet(BaseObject): nchars = data2_len - 1 if nb: assert nchars % 2 == 0 nchars /= 2 utext, endpos = unpack_unicode_update_pos(data2, 0, known_len=nchars) assert endpos == data2_len o.text += utext
class Sheet(BaseObject): nchars = data2_len - 1 if nb: assert nchars % 2 == 0 nchars //= 2 utext, endpos = unpack_unicode_update_pos(data2, 0, known_len=nchars) assert endpos == data2_len o.text += utext
572
https://:@github.com/ijiraq/daomop.git
2b9c4af87e1b899b7589cfb04aa272540d2e8a04
@@ -61,7 +61,7 @@ class GeneralModelTest(FileReadingTestCase, DirectoryCleaningTestCase): # Put a real fits image on the first source, first observation apcor_str = "4 15 0.19 0.01" with open(self.get_abs_path(path), "rb") as fh: - self.first_image = DownloadedFitsImage(fh.read(), apcor_str, Mock(), in_memory=True) + self.first_image = DownloadedFitsImage(fh.read(), Mock(), apcor_str, in_memory=True) first_reading = self.model.get_current_workunit().get_sources()[0].get_readings()[0] self.model._on_image_loaded(first_reading, self.first_image)
src/ossos-pipeline/tests/test_integration/test_models.py
ArgSwap(idxs=1<->2 @(64,31)->(64,50))
class GeneralModelTest(FileReadingTestCase, DirectoryCleaningTestCase): # Put a real fits image on the first source, first observation apcor_str = "4 15 0.19 0.01" with open(self.get_abs_path(path), "rb") as fh: self.first_image = DownloadedFitsImage(fh.read(), apcor_str, Mock(), in_memory=True) first_reading = self.model.get_current_workunit().get_sources()[0].get_readings()[0] self.model._on_image_loaded(first_reading, self.first_image)
class GeneralModelTest(FileReadingTestCase, DirectoryCleaningTestCase): # Put a real fits image on the first source, first observation apcor_str = "4 15 0.19 0.01" with open(self.get_abs_path(path), "rb") as fh: self.first_image = DownloadedFitsImage(fh.read(), Mock(), apcor_str, in_memory=True) first_reading = self.model.get_current_workunit().get_sources()[0].get_readings()[0] self.model._on_image_loaded(first_reading, self.first_image)
573
https://:@github.com/iamsteadman/bambu-buffer.git
5704abdda5297d6cde2ccac7f91ded4108e1f616
@@ -12,7 +12,7 @@ def post_save_receiver(sender, instance, **kwargs): name = m.pop(0) app, model = name.lower().split('.') - if app != instance._meta.app_label and model != instance._meta.module_name: + if app != instance._meta.app_label or model != instance._meta.module_name: continue if any(m):
bambu_buffer/receivers.py
ReplaceText(target='or' @(15,43)->(15,46))
def post_save_receiver(sender, instance, **kwargs): name = m.pop(0) app, model = name.lower().split('.') if app != instance._meta.app_label and model != instance._meta.module_name: continue if any(m):
def post_save_receiver(sender, instance, **kwargs): name = m.pop(0) app, model = name.lower().split('.') if app != instance._meta.app_label or model != instance._meta.module_name: continue if any(m):
574
https://:@github.com/khllkcm/templot.git
d4b329d16f94e3808e54b05116a8545e987a5e7b
@@ -95,7 +95,7 @@ def plot_aggregated_map( if aggregation_method not in aggregates: raise ValueError( - f"{group} is not a valid aggregation method. Possible values are: {', '.join([k for k in aggregates])}" + f"{aggregation_method} is not a valid aggregation method. Possible values are: {', '.join([k for k in aggregates])}" ) map_data = aggregates[aggregation_method][variables]
templot/plot_aggregated_map.py
ReplaceText(target='aggregation_method' @(98,15)->(98,20))
def plot_aggregated_map( if aggregation_method not in aggregates: raise ValueError( f"{group} is not a valid aggregation method. Possible values are: {', '.join([k for k in aggregates])}" ) map_data = aggregates[aggregation_method][variables]
def plot_aggregated_map( if aggregation_method not in aggregates: raise ValueError( f"{aggregation_method} is not a valid aggregation method. Possible values are: {', '.join([k for k in aggregates])}" ) map_data = aggregates[aggregation_method][variables]
575
https://:@github.com/cjneely10/BioMetaDB.git
71352d2e75c038af886007c7b82591896aa23e1c
@@ -146,4 +146,4 @@ def create_database(db_name, table_name, directory_name, data_file, alias, silen if not silent: _initialization_display_message_epilogue() if not integrity_cancel: - integrity_check(directory_name, table_name, "None", silent) + integrity_check(db_name, table_name, "None", silent)
BioMetaDB/DBOperations/create_database.py
ReplaceText(target='db_name' @(149,24)->(149,38))
def create_database(db_name, table_name, directory_name, data_file, alias, silen if not silent: _initialization_display_message_epilogue() if not integrity_cancel: integrity_check(directory_name, table_name, "None", silent)
def create_database(db_name, table_name, directory_name, data_file, alias, silen if not silent: _initialization_display_message_epilogue() if not integrity_cancel: integrity_check(db_name, table_name, "None", silent)
576
https://:@github.com/lsst-sqre/kubespawner.git
5f0ca5f9734552b4c40563828899316d2b69156c
@@ -361,7 +361,7 @@ class KubeSpawner(Spawner): # shut down to complete while True: data = yield self.get_pod_info(self.pod_name) - if data is not None: + if data is None: break time.sleep(5)
kubespawner/spawner.py
ReplaceText(target=' is ' @(364,23)->(364,31))
class KubeSpawner(Spawner): # shut down to complete while True: data = yield self.get_pod_info(self.pod_name) if data is not None: break time.sleep(5)
class KubeSpawner(Spawner): # shut down to complete while True: data = yield self.get_pod_info(self.pod_name) if data is None: break time.sleep(5)
577
https://:@github.com/lsst-sqre/kubespawner.git
df9936a785e34c898ffd065f17697a0035cf310c
@@ -404,7 +404,7 @@ class KubeSpawner(Spawner): @gen.coroutine def start(self): pvc_data = get_pvc_info(self.pvc_name) - if pvc_data is not None: + if pvc_data is None: pvc_manifest = self.get_pvc_manifest() yield self.httpclient.fetch(self.request( url=k8s_url(self.namespace, 'persistentvolumeclaims'),
kubespawner/spawner.py
ReplaceText(target=' is ' @(407,19)->(407,27))
class KubeSpawner(Spawner): @gen.coroutine def start(self): pvc_data = get_pvc_info(self.pvc_name) if pvc_data is not None: pvc_manifest = self.get_pvc_manifest() yield self.httpclient.fetch(self.request( url=k8s_url(self.namespace, 'persistentvolumeclaims'),
class KubeSpawner(Spawner): @gen.coroutine def start(self): pvc_data = get_pvc_info(self.pvc_name) if pvc_data is None: pvc_manifest = self.get_pvc_manifest() yield self.httpclient.fetch(self.request( url=k8s_url(self.namespace, 'persistentvolumeclaims'),
578
https://:@github.com/d-meiser/cold-atoms.git
8235e3bc1d9cc9b1bbfe27948d24adc48db33f06
@@ -48,5 +48,5 @@ def bend_kick(dt, Bz, ensemble, forces, reference_impl=False): f = np.zeros_like(ensemble.v) for force in forces: force.force(dt, ensemble, f) - ensemble.v *= f / m + ensemble.v += f / m updater(0.5 * dt, omegaB, ensemble.x, ensemble.v)
src/coldatoms/bend_kick.py
ReplaceText(target='+=' @(51,19)->(51,21))
def bend_kick(dt, Bz, ensemble, forces, reference_impl=False): f = np.zeros_like(ensemble.v) for force in forces: force.force(dt, ensemble, f) ensemble.v *= f / m updater(0.5 * dt, omegaB, ensemble.x, ensemble.v)
def bend_kick(dt, Bz, ensemble, forces, reference_impl=False): f = np.zeros_like(ensemble.v) for force in forces: force.force(dt, ensemble, f) ensemble.v += f / m updater(0.5 * dt, omegaB, ensemble.x, ensemble.v)
579
https://:@github.com/silvacms/silva.pas.base.git
d0e1b5196420a6a1359fc5f45d220d172f7a1e01
@@ -41,7 +41,7 @@ class SilvaCascadingPASPlugin(SearchPrincipalsPlugin): for authenticator_id, auth in authenticators: try: info = auth.authenticateCredentials(credentials) - if info is not None and info[0] is None: + if info is not None and info[0] is not None: # Failed login can be None OR (None, None) return info except _SWALLOWABLE_PLUGIN_EXCEPTIONS:
src/silva/pas/base/plugins/cascading.py
ReplaceText(target=' is not ' @(44,47)->(44,51))
class SilvaCascadingPASPlugin(SearchPrincipalsPlugin): for authenticator_id, auth in authenticators: try: info = auth.authenticateCredentials(credentials) if info is not None and info[0] is None: # Failed login can be None OR (None, None) return info except _SWALLOWABLE_PLUGIN_EXCEPTIONS:
class SilvaCascadingPASPlugin(SearchPrincipalsPlugin): for authenticator_id, auth in authenticators: try: info = auth.authenticateCredentials(credentials) if info is not None and info[0] is not None: # Failed login can be None OR (None, None) return info except _SWALLOWABLE_PLUGIN_EXCEPTIONS:
580
https://:@github.com/jgolob/maliampi.git
dd0a5bf78106e083005db0e7bd21f1122e63f162
@@ -239,7 +239,7 @@ class Workflow_Placement(sl.WorkflowTask): lpca = self.new_task( 'calculate_lpca', Jplace_PCA, - containerinfo=long_containerinfo, + containerinfo=highmem_containerinfo, path=os.path.join( self.destination_dir, 'placement',
maliampi/subcommands/placement.py
ReplaceText(target='highmem_containerinfo' @(242,26)->(242,44))
class Workflow_Placement(sl.WorkflowTask): lpca = self.new_task( 'calculate_lpca', Jplace_PCA, containerinfo=long_containerinfo, path=os.path.join( self.destination_dir, 'placement',
class Workflow_Placement(sl.WorkflowTask): lpca = self.new_task( 'calculate_lpca', Jplace_PCA, containerinfo=highmem_containerinfo, path=os.path.join( self.destination_dir, 'placement',
581
https://:@github.com/jgolob/maliampi.git
cdea28dbb12093af0165e983149da396b69fe33a
@@ -115,7 +115,7 @@ class Workflow_DADA2(sl.WorkflowTask): batch_errModels[batch] = self.new_task( 'dada2_learn_error_batch_{}'.format(batch), DADA2_LearnError, - containerinfo=heavy_containerinfo, + containerinfo=midcpu_containerinfo, batch=batch, tar_reads=False, path=os.path.join(
maliampi/subcommands/sv_dada2.py
ReplaceText(target='midcpu_containerinfo' @(118,30)->(118,49))
class Workflow_DADA2(sl.WorkflowTask): batch_errModels[batch] = self.new_task( 'dada2_learn_error_batch_{}'.format(batch), DADA2_LearnError, containerinfo=heavy_containerinfo, batch=batch, tar_reads=False, path=os.path.join(
class Workflow_DADA2(sl.WorkflowTask): batch_errModels[batch] = self.new_task( 'dada2_learn_error_batch_{}'.format(batch), DADA2_LearnError, containerinfo=midcpu_containerinfo, batch=batch, tar_reads=False, path=os.path.join(
582
https://:@github.com/jgolob/maliampi.git
3cde81cdd2b908b7edd44a000986744301092fae
@@ -203,7 +203,7 @@ class Workflow_Classify(sl.WorkflowTask): placement_db_classified = self.new_task( 'classify_into_placement_db', PlacementDB_Classify_SV, - containerinfo=heavy_containerinfo, + containerinfo=midcpu_containerinfo, ) placement_db_classified.in_placement_db = placement_db_w_si.out_placement_db placement_db_classified.in_refpkg_tgz = refpkg_tgz.out_refpkg_tgz
maliampi/subcommands/classify.py
ReplaceText(target='midcpu_containerinfo' @(206,26)->(206,45))
class Workflow_Classify(sl.WorkflowTask): placement_db_classified = self.new_task( 'classify_into_placement_db', PlacementDB_Classify_SV, containerinfo=heavy_containerinfo, ) placement_db_classified.in_placement_db = placement_db_w_si.out_placement_db placement_db_classified.in_refpkg_tgz = refpkg_tgz.out_refpkg_tgz
class Workflow_Classify(sl.WorkflowTask): placement_db_classified = self.new_task( 'classify_into_placement_db', PlacementDB_Classify_SV, containerinfo=midcpu_containerinfo, ) placement_db_classified.in_placement_db = placement_db_w_si.out_placement_db placement_db_classified.in_refpkg_tgz = refpkg_tgz.out_refpkg_tgz
583
https://:@github.com/jgolob/maliampi.git
f50cfcea1e390ae5eb05de0c116c2251a09f4864
@@ -203,7 +203,7 @@ class Workflow_Classify(sl.WorkflowTask): placement_db_classified = self.new_task( 'classify_into_placement_db', PlacementDB_Classify_SV, - containerinfo=midcpu_containerinfo, + containerinfo=highmem_containerinfo, ) placement_db_classified.in_placement_db = placement_db_w_si.out_placement_db placement_db_classified.in_refpkg_tgz = refpkg_tgz.out_refpkg_tgz
maliampi/subcommands/classify.py
ReplaceText(target='highmem_containerinfo' @(206,26)->(206,46))
class Workflow_Classify(sl.WorkflowTask): placement_db_classified = self.new_task( 'classify_into_placement_db', PlacementDB_Classify_SV, containerinfo=midcpu_containerinfo, ) placement_db_classified.in_placement_db = placement_db_w_si.out_placement_db placement_db_classified.in_refpkg_tgz = refpkg_tgz.out_refpkg_tgz
class Workflow_Classify(sl.WorkflowTask): placement_db_classified = self.new_task( 'classify_into_placement_db', PlacementDB_Classify_SV, containerinfo=highmem_containerinfo, ) placement_db_classified.in_placement_db = placement_db_w_si.out_placement_db placement_db_classified.in_refpkg_tgz = refpkg_tgz.out_refpkg_tgz
584
https://:@github.com/d3rp/clima.git
92eced31f43fd49f87e6af8582066a25fc0ba726
@@ -46,7 +46,7 @@ def SeparateFlagArgs(args: list): Returns: A tuple with the Fire args (a list), followed by the Flag args (a list). """ - if len(args) > 1 and (args[-1] == '-h' or args[-1] == '--help') and '--' not in args: + if len(args) > 0 and (args[-1] == '-h' or args[-1] == '--help') and '--' not in args: args.pop() args.append('--') args.append('-h')
clima/fire/parser.py
ReplaceText(target='0' @(49,17)->(49,18))
def SeparateFlagArgs(args: list): Returns: A tuple with the Fire args (a list), followed by the Flag args (a list). """ if len(args) > 1 and (args[-1] == '-h' or args[-1] == '--help') and '--' not in args: args.pop() args.append('--') args.append('-h')
def SeparateFlagArgs(args: list): Returns: A tuple with the Fire args (a list), followed by the Flag args (a list). """ if len(args) > 0 and (args[-1] == '-h' or args[-1] == '--help') and '--' not in args: args.pop() args.append('--') args.append('-h')
585
https://:@github.com/jeffkinnison/pyrameter.git
306c95b1e0643fc47e0206281cd12c714849782d
@@ -47,7 +47,7 @@ def backend_factory(path, *args, **kwargs): if os.path.isfile(path) or os.path.isdir(path): from pyrameter.db.local import JsonStorage return JsonStorage(path, *args, **kwargs) - elif path.find('mongodb://') >= 0: + elif path.find('mongodb://') == 0: from pyrameter.db.mongo import MongoStorage return MongoStorage(path, *args, **kwargs) else:
pyrameter/db/backend_factory.py
ReplaceText(target='==' @(50,33)->(50,35))
def backend_factory(path, *args, **kwargs): if os.path.isfile(path) or os.path.isdir(path): from pyrameter.db.local import JsonStorage return JsonStorage(path, *args, **kwargs) elif path.find('mongodb://') >= 0: from pyrameter.db.mongo import MongoStorage return MongoStorage(path, *args, **kwargs) else:
def backend_factory(path, *args, **kwargs): if os.path.isfile(path) or os.path.isdir(path): from pyrameter.db.local import JsonStorage return JsonStorage(path, *args, **kwargs) elif path.find('mongodb://') == 0: from pyrameter.db.mongo import MongoStorage return MongoStorage(path, *args, **kwargs) else:
586
https://:@github.com/RADAR-base/pyRADAR-processing.git
2c6ea694e1d40b9c16d6a10e10c0af9afd5a3d0d
@@ -115,7 +115,7 @@ class Project(RadarObject): self.ptcs_update_info(info) labels = kwargs.get('labels', False) - if info: + if labels: for ptc in labels: if ptc not in self.participants: self.add_participant(ptc)
radar/wrappers.py
ReplaceText(target='labels' @(118,11)->(118,15))
class Project(RadarObject): self.ptcs_update_info(info) labels = kwargs.get('labels', False) if info: for ptc in labels: if ptc not in self.participants: self.add_participant(ptc)
class Project(RadarObject): self.ptcs_update_info(info) labels = kwargs.get('labels', False) if labels: for ptc in labels: if ptc not in self.participants: self.add_participant(ptc)
587
https://:@github.com/marshallward/ropes.git
b8fd70b68d2ec157c7f2397aab3931e50c5a16ae
@@ -145,7 +145,7 @@ class Rope(object): else: if start is None: offset = index.step + (head.length - 1) % (-index.step) - elif start > 0: + elif start >= 0: offset = index.step + min(start, head.length - 1) % (-index.step) else: offset = index.step + (start + head.length) % (-index.step)
ropes.py
ReplaceText(target='>=' @(148,35)->(148,36))
class Rope(object): else: if start is None: offset = index.step + (head.length - 1) % (-index.step) elif start > 0: offset = index.step + min(start, head.length - 1) % (-index.step) else: offset = index.step + (start + head.length) % (-index.step)
class Rope(object): else: if start is None: offset = index.step + (head.length - 1) % (-index.step) elif start >= 0: offset = index.step + min(start, head.length - 1) % (-index.step) else: offset = index.step + (start + head.length) % (-index.step)
588
https://:@github.com/pkgw/bibtools.git
2273ff1a94f4f6952026c5ab1d3ddffa042f2158
@@ -204,7 +204,7 @@ class Dump (multitool.Command): help_if_no_args = False def invoke (self, args, app=None, **kwargs): - if len (args) != 1: + if len (args) != 0: raise multitool.UsageError ('expected no arguments') app.export_all (sys.stdout, 72)
bibtools/cli.py
ReplaceText(target='0' @(207,25)->(207,26))
class Dump (multitool.Command): help_if_no_args = False def invoke (self, args, app=None, **kwargs): if len (args) != 1: raise multitool.UsageError ('expected no arguments') app.export_all (sys.stdout, 72)
class Dump (multitool.Command): help_if_no_args = False def invoke (self, args, app=None, **kwargs): if len (args) != 0: raise multitool.UsageError ('expected no arguments') app.export_all (sys.stdout, 72)
589
https://:@github.com/cooper-software/hammock.git
ed682f4d33be53d7d2f2b21b384a36a2770c6cf5
@@ -20,7 +20,7 @@ class Hammock(object): entities = set() self.collections_by_class_name = {} - for collection_cls in collections: + for collection_cls in collection_classes: entities.add(collection_cls.entity) collection = collection_cls(storage) self.collections_by_class_name[collection_cls.__name__] = collection
hammock/__init__.py
ReplaceText(target='collection_classes' @(23,24)->(23,35))
class Hammock(object): entities = set() self.collections_by_class_name = {} for collection_cls in collections: entities.add(collection_cls.entity) collection = collection_cls(storage) self.collections_by_class_name[collection_cls.__name__] = collection
class Hammock(object): entities = set() self.collections_by_class_name = {} for collection_cls in collection_classes: entities.add(collection_cls.entity) collection = collection_cls(storage) self.collections_by_class_name[collection_cls.__name__] = collection
590
https://:@github.com/bempp/bempp-cl.git
7ab5546ce89b626e146cb29433ed1113a3c72c9c
@@ -97,7 +97,7 @@ def visualize_with_jupyter_notebook(obj, mode="element", transformation=None): for element in grid.entity_iterator(0): index = element.index local_values = np.real( - transformation(obj.evaluate(element, local_coordinates)) + transformation(obj.evaluate(index, local_coordinates)) ) values[index] = local_values.flatten()
bempp/api/external/viewers.py
ReplaceText(target='index' @(100,44)->(100,51))
def visualize_with_jupyter_notebook(obj, mode="element", transformation=None): for element in grid.entity_iterator(0): index = element.index local_values = np.real( transformation(obj.evaluate(element, local_coordinates)) ) values[index] = local_values.flatten()
def visualize_with_jupyter_notebook(obj, mode="element", transformation=None): for element in grid.entity_iterator(0): index = element.index local_values = np.real( transformation(obj.evaluate(index, local_coordinates)) ) values[index] = local_values.flatten()
591
https://:@github.com/bempp/bempp-cl.git
094eb06f36e837f3a17d2abe23f4bb78d1a32dc0
@@ -91,7 +91,7 @@ def assemble_dense( cols = domain.global_dof_count nshape_test = dual_to_range.number_of_shape_functions - nshape_trial = dual_to_range.number_of_shape_functions + nshape_trial = domain.number_of_shape_functions precision = operator_descriptor.precision
bempp/core/numba/dense_assembler.py
ReplaceText(target='domain' @(94,19)->(94,32))
def assemble_dense( cols = domain.global_dof_count nshape_test = dual_to_range.number_of_shape_functions nshape_trial = dual_to_range.number_of_shape_functions precision = operator_descriptor.precision
def assemble_dense( cols = domain.global_dof_count nshape_test = dual_to_range.number_of_shape_functions nshape_trial = domain.number_of_shape_functions precision = operator_descriptor.precision
592
https://:@github.com/bempp/bempp-cl.git
242da1c08304423e6ef6ead60e691928426dab1b
@@ -1035,9 +1035,9 @@ def helmholtz_hypersingular_regular( ] * ( curl_product[test_fun_index, trial_fun_index] - wavenumber - * wavenumber + * wavenumber * local_test_fun_values[ - 0, test_fun_index, quad_point_index + 0, test_fun_index, test_point_index ] * local_trial_fun_values[ 0, trial_fun_index, quad_point_index
bempp/core/numba/kernels.py
ReplaceText(target='test_point_index' @(1040,55)->(1040,71))
def helmholtz_hypersingular_regular( ] * ( curl_product[test_fun_index, trial_fun_index] - wavenumber * wavenumber * local_test_fun_values[ 0, test_fun_index, quad_point_index ] * local_trial_fun_values[ 0, trial_fun_index, quad_point_index
def helmholtz_hypersingular_regular( ] * ( curl_product[test_fun_index, trial_fun_index] - wavenumber * wavenumber * local_test_fun_values[ 0, test_fun_index, test_point_index ] * local_trial_fun_values[ 0, trial_fun_index, quad_point_index
593
https://:@github.com/bempp/bempp-cl.git
1bb4a1c097e4e3e87ed328f37cde5abd9c0c24b9
@@ -33,7 +33,7 @@ class DensePotentialAssembler(object): x_transformed = self.space.map_to_full_grid @ ( self.space.dof_transformation @ x ) - result = implementation(x) + result = implementation(x_transformed) return result.reshape([kernel_dimension, -1], order="F") self._evaluator = potential_evaluator
bempp/core/dense_potential_assembler.py
ReplaceText(target='x_transformed' @(36,36)->(36,37))
class DensePotentialAssembler(object): x_transformed = self.space.map_to_full_grid @ ( self.space.dof_transformation @ x ) result = implementation(x) return result.reshape([kernel_dimension, -1], order="F") self._evaluator = potential_evaluator
class DensePotentialAssembler(object): x_transformed = self.space.map_to_full_grid @ ( self.space.dof_transformation @ x ) result = implementation(x_transformed) return result.reshape([kernel_dimension, -1], order="F") self._evaluator = potential_evaluator
594
https://:@bitbucket.org/agaveapi/agaveflask.git
17f24e257b275fe1bf64afcf6a5b09d87ab05b79
@@ -34,7 +34,7 @@ def read_config(conf_file='service.conf'): raise RuntimeError('No config file found.') if not parser.parser.read(place): raise RuntimeError("couldn't read config file from {0}" - .format(', '.join(place))) + .format(', '.join(places))) return parser Config = read_config() \ No newline at end of file
agaveflask/config.py
ReplaceText(target='places' @(37,45)->(37,50))
def read_config(conf_file='service.conf'): raise RuntimeError('No config file found.') if not parser.parser.read(place): raise RuntimeError("couldn't read config file from {0}" .format(', '.join(place))) return parser Config = read_config() \ No newline at end of file
def read_config(conf_file='service.conf'): raise RuntimeError('No config file found.') if not parser.parser.read(place): raise RuntimeError("couldn't read config file from {0}" .format(', '.join(places))) return parser Config = read_config() \ No newline at end of file
595
https://:@github.com/llazzaro/jarvispatrick.git
05457d04a6982a47c39d5109b6e91efa6441bd93
@@ -27,7 +27,7 @@ class JarvisPatrick(object): def __call__(self, number_of_neighbors, number_of_common_neighbors): """ """ - if number_of_common_neighbors >= number_of_neighbors: + if number_of_common_neighbors > number_of_neighbors: raise ValueError('Asked for more common neighbors than number of neighbors') neighbors_list = {} for element in self.dataset_elements:
jarvispatrick/__init__.py
ReplaceText(target='>' @(30,38)->(30,40))
class JarvisPatrick(object): def __call__(self, number_of_neighbors, number_of_common_neighbors): """ """ if number_of_common_neighbors >= number_of_neighbors: raise ValueError('Asked for more common neighbors than number of neighbors') neighbors_list = {} for element in self.dataset_elements:
class JarvisPatrick(object): def __call__(self, number_of_neighbors, number_of_common_neighbors): """ """ if number_of_common_neighbors > number_of_neighbors: raise ValueError('Asked for more common neighbors than number of neighbors') neighbors_list = {} for element in self.dataset_elements:
596
https://:@github.com/kennydo/oauthlib.git
1f292e6923aa9419d28e3700e22102dffd447886
@@ -112,7 +112,7 @@ def collect_parameters(uri_query='', body='', headers=None, if isinstance(k, str): k = k.decode('utf-8') if isinstance(v, str): - if v.startswith('oauth_'): + if k.startswith('oauth_'): v = utils.unescape(v) else: v = v.decode('utf-8')
oauthlib/oauth1/rfc5849/signature.py
ReplaceText(target='k' @(115,15)->(115,16))
def collect_parameters(uri_query='', body='', headers=None, if isinstance(k, str): k = k.decode('utf-8') if isinstance(v, str): if v.startswith('oauth_'): v = utils.unescape(v) else: v = v.decode('utf-8')
def collect_parameters(uri_query='', body='', headers=None, if isinstance(k, str): k = k.decode('utf-8') if isinstance(v, str): if k.startswith('oauth_'): v = utils.unescape(v) else: v = v.decode('utf-8')
597
https://:@github.com/kennydo/oauthlib.git
8f57459fbbab1f317d5475f31f90b1a75016a2a8
@@ -281,7 +281,7 @@ def add_params_to_uri(uri, params, fragment=False): """Add a list of two-tuples to the uri query components.""" sch, net, path, par, query, fra = urlparse.urlparse(uri) if fragment: - fra = add_params_to_qs(query, params) + fra = add_params_to_qs(fra, params) else: query = add_params_to_qs(query, params) return urlparse.urlunparse((sch, net, path, par, query, fra))
oauthlib/common.py
ReplaceText(target='fra' @(284,31)->(284,36))
def add_params_to_uri(uri, params, fragment=False): """Add a list of two-tuples to the uri query components.""" sch, net, path, par, query, fra = urlparse.urlparse(uri) if fragment: fra = add_params_to_qs(query, params) else: query = add_params_to_qs(query, params) return urlparse.urlunparse((sch, net, path, par, query, fra))
def add_params_to_uri(uri, params, fragment=False): """Add a list of two-tuples to the uri query components.""" sch, net, path, par, query, fra = urlparse.urlparse(uri) if fragment: fra = add_params_to_qs(fra, params) else: query = add_params_to_qs(query, params) return urlparse.urlunparse((sch, net, path, par, query, fra))
598
https://:@github.com/jakobrunge/tigramite.git
2011867b0f0cc4c3ca6bb3317b2a3a37d828116b
@@ -236,7 +236,7 @@ def time_bin_with_mask(data, time_bin_length, sample_selector=None): sample_selector.shape = (T, 1) bindata = numpy.zeros( - (T / time_bin_length,) + data.shape[1:], dtype="float32") + (T // time_bin_length,) + data.shape[1:], dtype="float32") for index, i in enumerate(range(0, T - time_bin_length + 1, time_bin_length)): # print weighted_avg_and_std(fulldata[i:i+time_bin_length], axis=0,
tigramite/data_processing.py
ReplaceText(target='//' @(239,11)->(239,12))
def time_bin_with_mask(data, time_bin_length, sample_selector=None): sample_selector.shape = (T, 1) bindata = numpy.zeros( (T / time_bin_length,) + data.shape[1:], dtype="float32") for index, i in enumerate(range(0, T - time_bin_length + 1, time_bin_length)): # print weighted_avg_and_std(fulldata[i:i+time_bin_length], axis=0,
def time_bin_with_mask(data, time_bin_length, sample_selector=None): sample_selector.shape = (T, 1) bindata = numpy.zeros( (T // time_bin_length,) + data.shape[1:], dtype="float32") for index, i in enumerate(range(0, T - time_bin_length + 1, time_bin_length)): # print weighted_avg_and_std(fulldata[i:i+time_bin_length], axis=0,
599
https://:@github.com/jakobrunge/tigramite.git
2011867b0f0cc4c3ca6bb3317b2a3a37d828116b
@@ -528,7 +528,7 @@ class LinearMediation(Models): def tsg_to_net(self, node, max_lag): """Helper function to translate from time series graph to network.""" - row = node / max_lag + row = node // max_lag lag = node % max_lag return (row, -lag)
tigramite/models.py
ReplaceText(target='//' @(531,19)->(531,20))
class LinearMediation(Models): def tsg_to_net(self, node, max_lag): """Helper function to translate from time series graph to network.""" row = node / max_lag lag = node % max_lag return (row, -lag)
class LinearMediation(Models): def tsg_to_net(self, node, max_lag): """Helper function to translate from time series graph to network.""" row = node // max_lag lag = node % max_lag return (row, -lag)