Dataset Preview
method (string)block (string)complex_masked_block (string)complex_input (string)complex_target (string)medium_masked_block (string)medium_input (string)medium_target (string)simple_masked_block (string)simple_input (string)simple_target (string)
"def __init__(self, range_str): <TAB>self.range_str = to_str(range_str) <TAB>self.range = set() <TAB>range_str = to_str(range_str).split(",") <TAB>for item in range_str: <TAB><TAB>try: <TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <= int_range[1]: <TAB><TAB><TAB><TAB><TAB>self.range.add(i) <TAB><TAB><TAB><TAB><TAB>i += 1 <TAB><TAB>except Exception as e: <TAB><TAB><TAB>logging.error(e) "
"<TAB><TAB>try: <TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <= int_range[1]: <TAB><TAB><TAB><TAB><TAB>self.range.add(i) <TAB><TAB><TAB><TAB><TAB>i += 1 <TAB><TAB>except Exception as e: <TAB><TAB><TAB>logging.error(e)"
"<TAB><TAB>try: <TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <=<MASK>"
"def __init__(self, range_str): <TAB>self.range_str = to_str(range_str) <TAB>self.range = set() <TAB>range_str = to_str(range_str).split(",") <TAB>for item in range_str: <TAB><TAB>try: <TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <=<MASK> "
"int_range[1]: <TAB><TAB><TAB><TAB><TAB>self.range.add(i) <TAB><TAB><TAB><TAB><TAB>i += 1 <TAB><TAB>except Exception as e: <TAB><TAB><TAB>logging.error(e)"
"<TAB><TAB>try: <TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <= int_range[1]: <TAB><TAB><TAB><TAB><TAB>self.range.add(i) <TAB><TAB><TAB><TAB><TAB>i<MASK>"
"def __init__(self, range_str): <TAB>self.range_str = to_str(range_str) <TAB>self.range = set() <TAB>range_str = to_str(range_str).split(",") <TAB>for item in range_str: <TAB><TAB>try: <TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <= int_range[1]: <TAB><TAB><TAB><TAB><TAB>self.range.add(i) <TAB><TAB><TAB><TAB><TAB>i<MASK> "
"+= 1 <TAB><TAB>except Exception as e: <TAB><TAB><TAB>logging.error(e)"
"<TAB><TAB>try: <TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <= int_range[1]: <TAB><TAB><TAB><TAB><TAB>self.range.add(i) <TAB><TAB><TAB><TAB><TAB>i += 1 <TAB><TAB>except Exception as<MASK>"
"def __init__(self, range_str): <TAB>self.range_str = to_str(range_str) <TAB>self.range = set() <TAB>range_str = to_str(range_str).split(",") <TAB>for item in range_str: <TAB><TAB>try: <TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <= int_range[1]: <TAB><TAB><TAB><TAB><TAB>self.range.add(i) <TAB><TAB><TAB><TAB><TAB>i += 1 <TAB><TAB>except Exception as<MASK> "
"e: <TAB><TAB><TAB>logging.error(e)"
"def __init__(self, range_str): <TAB>self.range_str = to_str(range_str) <TAB>self.range = set() <TAB>range_str = to_str(range_str).split(",") <TAB>for item in range_str: <TAB><TAB>try: <TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <= int_range[1]: <TAB><TAB><TAB><TAB><TAB>self.range.add(i) <TAB><TAB><TAB><TAB><TAB>i += 1 <TAB><TAB>except Exception as e: <TAB><TAB><TAB>logging.error(e) "
"<TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <= int_range[1]: <TAB><TAB><TAB><TAB><TAB>self.range.add(i) <TAB><TAB><TAB><TAB><TAB>i += 1"
"<TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <MASK>"
"def __init__(self, range_str): <TAB>self.range_str = to_str(range_str) <TAB>self.range = set() <TAB>range_str = to_str(range_str).split(",") <TAB>for item in range_str: <TAB><TAB>try: <TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <MASK> <TAB><TAB>except Exception as e: <TAB><TAB><TAB>logging.error(e) "
"<TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <= int_range[1]: <TAB><TAB><TAB><TAB><TAB>self.range.add(i) <TAB><TAB><TAB><TAB><TAB>i += 1"
"<TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <= int_range[1]: <MASK>"
"def __init__(self, range_str): <TAB>self.range_str = to_str(range_str) <TAB>self.range = set() <TAB>range_str = to_str(range_str).split(",") <TAB>for item in range_str: <TAB><TAB>try: <TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <= int_range[1]: <MASK> <TAB><TAB>except Exception as e: <TAB><TAB><TAB>logging.error(e) "
"<TAB><TAB><TAB><TAB><TAB>self.range.add(i) <TAB><TAB><TAB><TAB><TAB>i += 1"
"<TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <= int_range[1]: <MASK>"
"def __init__(self, range_str): <TAB>self.range_str = to_str(range_str) <TAB>self.range = set() <TAB>range_str = to_str(range_str).split(",") <TAB>for item in range_str: <TAB><TAB>try: <TAB><TAB><TAB>int_range = item.split("-") <TAB><TAB><TAB>if len(int_range) == 1: <TAB><TAB><TAB><TAB>if item: <TAB><TAB><TAB><TAB><TAB>self.range.add(int(item)) <TAB><TAB><TAB>elif len(int_range) == 2: <TAB><TAB><TAB><TAB>int_range[0] = int(int_range[0]) <TAB><TAB><TAB><TAB>int_range[1] = int(int_range[1]) <TAB><TAB><TAB><TAB>if int_range[0] < 0: <TAB><TAB><TAB><TAB><TAB>int_range[0] = 0 <TAB><TAB><TAB><TAB>if int_range[1] > 65535: <TAB><TAB><TAB><TAB><TAB>int_range[1] = 65535 <TAB><TAB><TAB><TAB>i = int_range[0] <TAB><TAB><TAB><TAB>while i <= int_range[1]: <MASK> <TAB><TAB>except Exception as e: <TAB><TAB><TAB>logging.error(e) "
"<TAB><TAB><TAB><TAB><TAB>self.range.add(i) <TAB><TAB><TAB><TAB><TAB>i += 1"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>) <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>) <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>) <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>) <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>)"
"<TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <MASK>"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <MASK> <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>) <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>) <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>) <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>)"
"<TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <MASK>"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <MASK> <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>) <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>) <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>) <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>)"
"<TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <MASK>"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <MASK> <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>) <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>) <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>) <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB><TAB>is_strict=True, <TAB><TAB>)"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>) <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>) <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>) <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>) <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>)"
"<TAB><TAB>train_input_fn = input_reader.InputFn( <MASK>"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>) <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <MASK> <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>) <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>) <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>)"
"<TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <MASK>"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>) <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <MASK> <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>) <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>) <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>)"
"<TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <MASK>"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>) <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <MASK> <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>) <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>) <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>)"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>) <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>) <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>) <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>) <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>)"
"<TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <MASK>"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>) <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>) <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <MASK> <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>) <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>)"
"<TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <MASK>"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>) <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>) <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <MASK> <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>) <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>)"
"<TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <MASK>"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>) <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>) <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <MASK> <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>) <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>)"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>) <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>) <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>) <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>) <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>)"
"<TAB><TAB>callbacks.append( <MASK>"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>) <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>) <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>) <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <MASK> <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>)"
"<TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <MASK>"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>) <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>) <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>) <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <MASK> <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>)"
"<TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <MASK>"
"def run(callbacks=None): <TAB>keras_utils.set_session_config(enable_xla=FLAGS.enable_xla) <TAB>params = config_factory.config_generator(FLAGS.model) <TAB>params = params_dict.override_params_dict(params, FLAGS.config_file, is_strict=True) <TAB>params = params_dict.override_params_dict( <TAB><TAB>params, FLAGS.params_override, is_strict=True <TAB>) <TAB>params.override( <TAB><TAB>{ <TAB><TAB><TAB>"strategy_type": FLAGS.strategy_type, <TAB><TAB><TAB>"model_dir": FLAGS.model_dir, <TAB><TAB><TAB>"strategy_config": executor.strategy_flags_dict(), <TAB><TAB>}, <TAB><TAB>is_strict=False, <TAB>) <TAB># Make sure use_tpu and strategy_type are in sync. <TAB>params.use_tpu = params.strategy_type == "tpu" <TAB>if not params.use_tpu: <TAB><TAB>params.override( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"architecture": { <TAB><TAB><TAB><TAB><TAB>"use_bfloat16": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB><TAB>"norm_activation": { <TAB><TAB><TAB><TAB><TAB>"use_sync_bn": False, <TAB><TAB><TAB><TAB>}, <TAB><TAB><TAB>}, <TAB><TAB><TAB>is_strict=True, <TAB><TAB>) <TAB>params.validate() <TAB>params.lock() <TAB>pp = pprint.PrettyPrinter() <TAB>params_str = pp.pformat(params.as_dict()) <TAB>logging.info("Model Parameters: %s", params_str) <TAB>train_input_fn = None <TAB>eval_input_fn = None <TAB>training_file_pattern = ( <TAB><TAB>FLAGS.training_file_pattern or params.train.train_file_pattern <TAB>) <TAB>eval_file_pattern = FLAGS.eval_file_pattern or params.eval.eval_file_pattern <TAB>if not training_file_pattern and not eval_file_pattern: <TAB><TAB>raise ValueError( <TAB><TAB><TAB>"Must provide at least one of training_file_pattern and " <TAB><TAB><TAB>"eval_file_pattern." <TAB><TAB>) <TAB>if training_file_pattern: <TAB><TAB># Use global batch size for single host. <TAB><TAB>train_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=training_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.TRAIN, <TAB><TAB><TAB>batch_size=params.train.batch_size, <TAB><TAB>) <TAB>if eval_file_pattern: <TAB><TAB>eval_input_fn = input_reader.InputFn( <TAB><TAB><TAB>file_pattern=eval_file_pattern, <TAB><TAB><TAB>params=params, <TAB><TAB><TAB>mode=input_reader.ModeKeys.PREDICT_WITH_GT, <TAB><TAB><TAB>batch_size=params.eval.batch_size, <TAB><TAB><TAB>num_examples=params.eval.eval_samples, <TAB><TAB>) <TAB>if callbacks is None: <TAB><TAB>callbacks = [] <TAB>if FLAGS.log_steps: <TAB><TAB>callbacks.append( <TAB><TAB><TAB>keras_utils.TimeHistory( <TAB><TAB><TAB><TAB>batch_size=params.train.batch_size, <MASK> <TAB>return run_executor( <TAB><TAB>params, <TAB><TAB>FLAGS.mode, <TAB><TAB>checkpoint_path=FLAGS.checkpoint_path, <TAB><TAB>train_input_fn=train_input_fn, <TAB><TAB>eval_input_fn=eval_input_fn, <TAB><TAB>callbacks=callbacks, <TAB>) "
"<TAB><TAB><TAB><TAB>log_steps=FLAGS.log_steps, <TAB><TAB><TAB>) <TAB><TAB>)"
"def check_surrounding_whitespaces(self, definition, docstring): <TAB>"""D210: No whitespaces allowed surrounding docstring text.""" <TAB>if docstring: <TAB><TAB>lines = ast.literal_eval(docstring).split("\n") <TAB><TAB>if lines[0].startswith(" ") or len(lines) == 1 and lines[0].endswith(" "): <TAB><TAB><TAB>return violations.D210() "
"<TAB><TAB>lines = ast.literal_eval(docstring).split("\n") <TAB><TAB>if lines[0].startswith(" ") or len(lines) == 1 and lines[0].endswith(" "): <TAB><TAB><TAB>return violations.D210()"
"<TAB><TAB>lines =<MASK>"
"def check_surrounding_whitespaces(self, definition, docstring): <TAB>"""D210: No whitespaces allowed surrounding docstring text.""" <TAB>if docstring: <TAB><TAB>lines =<MASK> "
"ast.literal_eval(docstring).split("\n") <TAB><TAB>if lines[0].startswith(" ") or len(lines) == 1 and lines[0].endswith(" "): <TAB><TAB><TAB>return violations.D210()"
"<TAB><TAB>lines = ast.literal_eval(docstring).split("\n") <TAB><TAB>if lines[0].startswith(" ") or len(lines) == 1 and<MASK>"
"def check_surrounding_whitespaces(self, definition, docstring): <TAB>"""D210: No whitespaces allowed surrounding docstring text.""" <TAB>if docstring: <TAB><TAB>lines = ast.literal_eval(docstring).split("\n") <TAB><TAB>if lines[0].startswith(" ") or len(lines) == 1 and<MASK> "
"lines[0].endswith(" "): <TAB><TAB><TAB>return violations.D210()"
"<TAB><TAB>lines = ast.literal_eval(docstring).split("\n") <TAB><TAB>if lines[0].startswith(" ") or len(lines) == 1 and lines[0].endswith(" "): <MASK>"
"def check_surrounding_whitespaces(self, definition, docstring): <TAB>"""D210: No whitespaces allowed surrounding docstring text.""" <TAB>if docstring: <TAB><TAB>lines = ast.literal_eval(docstring).split("\n") <TAB><TAB>if lines[0].startswith(" ") or len(lines) == 1 and lines[0].endswith(" "): <MASK> "
"<TAB><TAB><TAB>return violations.D210()"
"def import_names(): <TAB>plat_table = ( <TAB><TAB>("windows", ("windows")), <TAB><TAB>("darwin", ("darwin", "ios")), <TAB><TAB>("linux", ("linux*",)), <TAB><TAB>("freebsd", ("freebsd*", "openbsd*")), <TAB><TAB>("poky", ("poky",)), <TAB>) <TAB>arch_table = ( <TAB><TAB>("x86", ("i386", "i486", "i586", "i686")), <TAB><TAB>("x86_64", ("x64", "x86_64", "amd64", "intel")), <TAB><TAB>("arm", ("armv5",)), <TAB><TAB>("armv6", ("armv6l",)), <TAB><TAB>("armv7", ("armv7l",)), <TAB><TAB>("ppc64", ("ppc64le",)), <TAB><TAB>("mips32", ("mips",)), <TAB><TAB>("aarch32", ("aarch32",)), <TAB><TAB>("aarch64", ("aarch64", "arm64")), <TAB>) <TAB>plat = platform.system().lower() <TAB>mach = platform.machine().lower() <TAB>for alias, platlist in plat_table: <TAB><TAB>for s in platlist: <TAB><TAB><TAB>if s.startswith(plat): <TAB><TAB><TAB><TAB>plat = alias <TAB><TAB><TAB><TAB>break <TAB>if plat == "linux": <TAB><TAB>cname, cver = platform.libc_ver() <TAB><TAB>if cname == "musl": <TAB><TAB><TAB>plat = "musl" <TAB><TAB>elif cname == "libc": <TAB><TAB><TAB>plat = "android" <TAB>for alias, archlist in arch_table: <TAB><TAB>if mach in archlist: <TAB><TAB><TAB>mach = alias <TAB><TAB><TAB>break <TAB>if plat == "windows" and mach == "x86_64": <TAB><TAB>bitness = struct.calcsize("P".encode()) * 8 <TAB><TAB>if bitness == 32: <TAB><TAB><TAB>mach = "x86" <TAB>name = ".".join([__name__, "%s_%s" % (plat, mach), "pytransform"]) <TAB>m = __import__(name, globals(), locals(), ["*"]) <TAB>sys.modules[__name__].__dict__.update(m.__dict__) "
"<TAB><TAB>cname, cver = platform.libc_ver() <TAB><TAB>if cname == "musl": <TAB><TAB><TAB>plat = "musl" <TAB><TAB>elif cname == "libc": <TAB><TAB><TAB>plat = "android""
"<TAB><TAB>cname, cver =<MASK>"
"def import_names(): <TAB>plat_table = ( <TAB><TAB>("windows", ("windows")), <TAB><TAB>("darwin", ("darwin", "ios")), <TAB><TAB>("linux", ("linux*",)), <TAB><TAB>("freebsd", ("freebsd*", "openbsd*")), <TAB><TAB>("poky", ("poky",)), <TAB>) <TAB>arch_table = ( <TAB><TAB>("x86", ("i386", "i486", "i586", "i686")), <TAB><TAB>("x86_64", ("x64", "x86_64", "amd64", "intel")), <TAB><TAB>("arm", ("armv5",)), <TAB><TAB>("armv6", ("armv6l",)), <TAB><TAB>("armv7", ("armv7l",)), <TAB><TAB>("ppc64", ("ppc64le",)), <TAB><TAB>("mips32", ("mips",)), <TAB><TAB>("aarch32", ("aarch32",)), <TAB><TAB>("aarch64", ("aarch64", "arm64")), <TAB>) <TAB>plat = platform.system().lower() <TAB>mach = platform.machine().lower() <TAB>for alias, platlist in plat_table: <TAB><TAB>for s in platlist: <TAB><TAB><TAB>if s.startswith(plat): <TAB><TAB><TAB><TAB>plat = alias <TAB><TAB><TAB><TAB>break <TAB>if plat == "linux": <TAB><TAB>cname, cver =<MASK> <TAB>for alias, archlist in arch_table: <TAB><TAB>if mach in archlist: <TAB><TAB><TAB>mach = alias <TAB><TAB><TAB>break <TAB>if plat == "windows" and mach == "x86_64": <TAB><TAB>bitness = struct.calcsize("P".encode()) * 8 <TAB><TAB>if bitness == 32: <TAB><TAB><TAB>mach = "x86" <TAB>name = ".".join([__name__, "%s_%s" % (plat, mach), "pytransform"]) <TAB>m = __import__(name, globals(), locals(), ["*"]) <TAB>sys.modules[__name__].__dict__.update(m.__dict__) "
"platform.libc_ver() <TAB><TAB>if cname == "musl": <TAB><TAB><TAB>plat = "musl" <TAB><TAB>elif cname == "libc": <TAB><TAB><TAB>plat = "android""
"<TAB><TAB>cname, cver = platform.libc_ver() <TAB><TAB>if cname == "musl": <TAB><TAB><TAB>plat =<MASK>"
"def import_names(): <TAB>plat_table = ( <TAB><TAB>("windows", ("windows")), <TAB><TAB>("darwin", ("darwin", "ios")), <TAB><TAB>("linux", ("linux*",)), <TAB><TAB>("freebsd", ("freebsd*", "openbsd*")), <TAB><TAB>("poky", ("poky",)), <TAB>) <TAB>arch_table = ( <TAB><TAB>("x86", ("i386", "i486", "i586", "i686")), <TAB><TAB>("x86_64", ("x64", "x86_64", "amd64", "intel")), <TAB><TAB>("arm", ("armv5",)), <TAB><TAB>("armv6", ("armv6l",)), <TAB><TAB>("armv7", ("armv7l",)), <TAB><TAB>("ppc64", ("ppc64le",)), <TAB><TAB>("mips32", ("mips",)), <TAB><TAB>("aarch32", ("aarch32",)), <TAB><TAB>("aarch64", ("aarch64", "arm64")), <TAB>) <TAB>plat = platform.system().lower() <TAB>mach = platform.machine().lower() <TAB>for alias, platlist in plat_table: <TAB><TAB>for s in platlist: <TAB><TAB><TAB>if s.startswith(plat): <TAB><TAB><TAB><TAB>plat = alias <TAB><TAB><TAB><TAB>break <TAB>if plat == "linux": <TAB><TAB>cname, cver = platform.libc_ver() <TAB><TAB>if cname == "musl": <TAB><TAB><TAB>plat =<MASK> <TAB>for alias, archlist in arch_table: <TAB><TAB>if mach in archlist: <TAB><TAB><TAB>mach = alias <TAB><TAB><TAB>break <TAB>if plat == "windows" and mach == "x86_64": <TAB><TAB>bitness = struct.calcsize("P".encode()) * 8 <TAB><TAB>if bitness == 32: <TAB><TAB><TAB>mach = "x86" <TAB>name = ".".join([__name__, "%s_%s" % (plat, mach), "pytransform"]) <TAB>m = __import__(name, globals(), locals(), ["*"]) <TAB>sys.modules[__name__].__dict__.update(m.__dict__) "
""musl" <TAB><TAB>elif cname == "libc": <TAB><TAB><TAB>plat = "android""
"<TAB><TAB>cname, cver = platform.libc_ver() <TAB><TAB>if cname == "musl": <TAB><TAB><TAB>plat = "musl" <TAB><TAB>elif cname ==<MASK>"
"def import_names(): <TAB>plat_table = ( <TAB><TAB>("windows", ("windows")), <TAB><TAB>("darwin", ("darwin", "ios")), <TAB><TAB>("linux", ("linux*",)), <TAB><TAB>("freebsd", ("freebsd*", "openbsd*")), <TAB><TAB>("poky", ("poky",)), <TAB>) <TAB>arch_table = ( <TAB><TAB>("x86", ("i386", "i486", "i586", "i686")), <TAB><TAB>("x86_64", ("x64", "x86_64", "amd64", "intel")), <TAB><TAB>("arm", ("armv5",)), <TAB><TAB>("armv6", ("armv6l",)), <TAB><TAB>("armv7", ("armv7l",)), <TAB><TAB>("ppc64", ("ppc64le",)), <TAB><TAB>("mips32", ("mips",)), <TAB><TAB>("aarch32", ("aarch32",)), <TAB><TAB>("aarch64", ("aarch64", "arm64")), <TAB>) <TAB>plat = platform.system().lower() <TAB>mach = platform.machine().lower() <TAB>for alias, platlist in plat_table: <TAB><TAB>for s in platlist: <TAB><TAB><TAB>if s.startswith(plat): <TAB><TAB><TAB><TAB>plat = alias <TAB><TAB><TAB><TAB>break <TAB>if plat == "linux": <TAB><TAB>cname, cver = platform.libc_ver() <TAB><TAB>if cname == "musl": <TAB><TAB><TAB>plat = "musl" <TAB><TAB>elif cname ==<MASK> <TAB>for alias, archlist in arch_table: <TAB><TAB>if mach in archlist: <TAB><TAB><TAB>mach = alias <TAB><TAB><TAB>break <TAB>if plat == "windows" and mach == "x86_64": <TAB><TAB>bitness = struct.calcsize("P".encode()) * 8 <TAB><TAB>if bitness == 32: <TAB><TAB><TAB>mach = "x86" <TAB>name = ".".join([__name__, "%s_%s" % (plat, mach), "pytransform"]) <TAB>m = __import__(name, globals(), locals(), ["*"]) <TAB>sys.modules[__name__].__dict__.update(m.__dict__) "
""libc": <TAB><TAB><TAB>plat = "android""
"def _get_troubleshooting_result_initial( <TAB>self, <TAB>resource_group_name: str, <TAB>network_watcher_name: str, <TAB>parameters: "_models.QueryTroubleshootingParameters", <TAB>**kwargs ) -> "_models.TroubleshootingResult": <TAB>cls = kwargs.pop("cls", None) # type: ClsType["_models.TroubleshootingResult"] <TAB>error_map = { <TAB><TAB>401: ClientAuthenticationError, <TAB><TAB>404: ResourceNotFoundError, <TAB><TAB>409: ResourceExistsError, <TAB>} <TAB>error_map.update(kwargs.pop("error_map", {})) <TAB>api_version = "2019-09-01" <TAB>content_type = kwargs.pop("content_type", "application/json") <TAB>accept = "application/json" <TAB># Construct URL <TAB>url = self._get_troubleshooting_result_initial.metadata["url"] # type: ignore <TAB>path_format_arguments = { <TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB>), <TAB><TAB>"networkWatcherName": self._serialize.url( <TAB><TAB><TAB>"network_watcher_name", network_watcher_name, "str" <TAB><TAB>), <TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB>), <TAB>} <TAB>url = self._client.format_url(url, **path_format_arguments) <TAB># Construct parameters <TAB>query_parameters = {} # type: Dict[str, Any] <TAB>query_parameters["api-version"] = self._serialize.query( <TAB><TAB>"api_version", api_version, "str" <TAB>) <TAB># Construct headers <TAB>header_parameters = {} # type: Dict[str, Any] <TAB>header_parameters["Content-Type"] = self._serialize.header( <TAB><TAB>"content_type", content_type, "str" <TAB>) <TAB>header_parameters["Accept"] = self._serialize.header("accept", accept, "str") <TAB>body_content_kwargs = {} # type: Dict[str, Any] <TAB>body_content = self._serialize.body(parameters, "QueryTroubleshootingParameters") <TAB>body_content_kwargs["content"] = body_content <TAB>request = self._client.post( <TAB><TAB>url, query_parameters, header_parameters, **body_content_kwargs <TAB>) <TAB>pipeline_response = await self._client._pipeline.run( <TAB><TAB>request, stream=False, **kwargs <TAB>) <TAB>response = pipeline_response.http_response <TAB>if response.status_code not in [200, 202]: <TAB><TAB>map_error( <TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <TAB><TAB>error = self._deserialize(_models.ErrorResponse, response) <TAB><TAB>raise HttpResponseError( <TAB><TAB><TAB>response=response, model=error, error_format=ARMErrorFormat <TAB><TAB>) <TAB>if response.status_code == 200: <TAB><TAB>deserialized = self._deserialize("TroubleshootingResult", pipeline_response) <TAB>if response.status_code == 202: <TAB><TAB>deserialized = self._deserialize("TroubleshootingResult", pipeline_response) <TAB>if cls: <TAB><TAB>return cls(pipeline_response, deserialized, {}) <TAB>return deserialized "
"<TAB><TAB>map_error( <TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <TAB><TAB>error = self._deserialize(_models.ErrorResponse, response) <TAB><TAB>raise HttpResponseError( <TAB><TAB><TAB>response=response, model=error, error_format=ARMErrorFormat <TAB><TAB>)"
"<TAB><TAB>map_error( <TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <MASK>"
"def _get_troubleshooting_result_initial( <TAB>self, <TAB>resource_group_name: str, <TAB>network_watcher_name: str, <TAB>parameters: "_models.QueryTroubleshootingParameters", <TAB>**kwargs ) -> "_models.TroubleshootingResult": <TAB>cls = kwargs.pop("cls", None) # type: ClsType["_models.TroubleshootingResult"] <TAB>error_map = { <TAB><TAB>401: ClientAuthenticationError, <TAB><TAB>404: ResourceNotFoundError, <TAB><TAB>409: ResourceExistsError, <TAB>} <TAB>error_map.update(kwargs.pop("error_map", {})) <TAB>api_version = "2019-09-01" <TAB>content_type = kwargs.pop("content_type", "application/json") <TAB>accept = "application/json" <TAB># Construct URL <TAB>url = self._get_troubleshooting_result_initial.metadata["url"] # type: ignore <TAB>path_format_arguments = { <TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB>), <TAB><TAB>"networkWatcherName": self._serialize.url( <TAB><TAB><TAB>"network_watcher_name", network_watcher_name, "str" <TAB><TAB>), <TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB>), <TAB>} <TAB>url = self._client.format_url(url, **path_format_arguments) <TAB># Construct parameters <TAB>query_parameters = {} # type: Dict[str, Any] <TAB>query_parameters["api-version"] = self._serialize.query( <TAB><TAB>"api_version", api_version, "str" <TAB>) <TAB># Construct headers <TAB>header_parameters = {} # type: Dict[str, Any] <TAB>header_parameters["Content-Type"] = self._serialize.header( <TAB><TAB>"content_type", content_type, "str" <TAB>) <TAB>header_parameters["Accept"] = self._serialize.header("accept", accept, "str") <TAB>body_content_kwargs = {} # type: Dict[str, Any] <TAB>body_content = self._serialize.body(parameters, "QueryTroubleshootingParameters") <TAB>body_content_kwargs["content"] = body_content <TAB>request = self._client.post( <TAB><TAB>url, query_parameters, header_parameters, **body_content_kwargs <TAB>) <TAB>pipeline_response = await self._client._pipeline.run( <TAB><TAB>request, stream=False, **kwargs <TAB>) <TAB>response = pipeline_response.http_response <TAB>if response.status_code not in [200, 202]: <TAB><TAB>map_error( <TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <MASK> <TAB>if response.status_code == 200: <TAB><TAB>deserialized = self._deserialize("TroubleshootingResult", pipeline_response) <TAB>if response.status_code == 202: <TAB><TAB>deserialized = self._deserialize("TroubleshootingResult", pipeline_response) <TAB>if cls: <TAB><TAB>return cls(pipeline_response, deserialized, {}) <TAB>return deserialized "
"<TAB><TAB>error = self._deserialize(_models.ErrorResponse, response) <TAB><TAB>raise HttpResponseError( <TAB><TAB><TAB>response=response, model=error, error_format=ARMErrorFormat <TAB><TAB>)"
"<TAB><TAB>map_error( <TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <TAB><TAB>error = self._deserialize(_models.ErrorResponse, response) <TAB><TAB>raise HttpResponseError( <MASK>"
"def _get_troubleshooting_result_initial( <TAB>self, <TAB>resource_group_name: str, <TAB>network_watcher_name: str, <TAB>parameters: "_models.QueryTroubleshootingParameters", <TAB>**kwargs ) -> "_models.TroubleshootingResult": <TAB>cls = kwargs.pop("cls", None) # type: ClsType["_models.TroubleshootingResult"] <TAB>error_map = { <TAB><TAB>401: ClientAuthenticationError, <TAB><TAB>404: ResourceNotFoundError, <TAB><TAB>409: ResourceExistsError, <TAB>} <TAB>error_map.update(kwargs.pop("error_map", {})) <TAB>api_version = "2019-09-01" <TAB>content_type = kwargs.pop("content_type", "application/json") <TAB>accept = "application/json" <TAB># Construct URL <TAB>url = self._get_troubleshooting_result_initial.metadata["url"] # type: ignore <TAB>path_format_arguments = { <TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB>), <TAB><TAB>"networkWatcherName": self._serialize.url( <TAB><TAB><TAB>"network_watcher_name", network_watcher_name, "str" <TAB><TAB>), <TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB>), <TAB>} <TAB>url = self._client.format_url(url, **path_format_arguments) <TAB># Construct parameters <TAB>query_parameters = {} # type: Dict[str, Any] <TAB>query_parameters["api-version"] = self._serialize.query( <TAB><TAB>"api_version", api_version, "str" <TAB>) <TAB># Construct headers <TAB>header_parameters = {} # type: Dict[str, Any] <TAB>header_parameters["Content-Type"] = self._serialize.header( <TAB><TAB>"content_type", content_type, "str" <TAB>) <TAB>header_parameters["Accept"] = self._serialize.header("accept", accept, "str") <TAB>body_content_kwargs = {} # type: Dict[str, Any] <TAB>body_content = self._serialize.body(parameters, "QueryTroubleshootingParameters") <TAB>body_content_kwargs["content"] = body_content <TAB>request = self._client.post( <TAB><TAB>url, query_parameters, header_parameters, **body_content_kwargs <TAB>) <TAB>pipeline_response = await self._client._pipeline.run( <TAB><TAB>request, stream=False, **kwargs <TAB>) <TAB>response = pipeline_response.http_response <TAB>if response.status_code not in [200, 202]: <TAB><TAB>map_error( <TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <TAB><TAB>error = self._deserialize(_models.ErrorResponse, response) <TAB><TAB>raise HttpResponseError( <MASK> <TAB>if response.status_code == 200: <TAB><TAB>deserialized = self._deserialize("TroubleshootingResult", pipeline_response) <TAB>if response.status_code == 202: <TAB><TAB>deserialized = self._deserialize("TroubleshootingResult", pipeline_response) <TAB>if cls: <TAB><TAB>return cls(pipeline_response, deserialized, {}) <TAB>return deserialized "
"<TAB><TAB><TAB>response=response, model=error, error_format=ARMErrorFormat <TAB><TAB>)"
"<TAB><TAB>map_error( <TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <TAB><TAB>error = self._deserialize(_models.ErrorResponse, response) <TAB><TAB>raise HttpResponseError( <TAB><TAB><TAB>response=response, model=error,<MASK>"
"def _get_troubleshooting_result_initial( <TAB>self, <TAB>resource_group_name: str, <TAB>network_watcher_name: str, <TAB>parameters: "_models.QueryTroubleshootingParameters", <TAB>**kwargs ) -> "_models.TroubleshootingResult": <TAB>cls = kwargs.pop("cls", None) # type: ClsType["_models.TroubleshootingResult"] <TAB>error_map = { <TAB><TAB>401: ClientAuthenticationError, <TAB><TAB>404: ResourceNotFoundError, <TAB><TAB>409: ResourceExistsError, <TAB>} <TAB>error_map.update(kwargs.pop("error_map", {})) <TAB>api_version = "2019-09-01" <TAB>content_type = kwargs.pop("content_type", "application/json") <TAB>accept = "application/json" <TAB># Construct URL <TAB>url = self._get_troubleshooting_result_initial.metadata["url"] # type: ignore <TAB>path_format_arguments = { <TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB>), <TAB><TAB>"networkWatcherName": self._serialize.url( <TAB><TAB><TAB>"network_watcher_name", network_watcher_name, "str" <TAB><TAB>), <TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB>), <TAB>} <TAB>url = self._client.format_url(url, **path_format_arguments) <TAB># Construct parameters <TAB>query_parameters = {} # type: Dict[str, Any] <TAB>query_parameters["api-version"] = self._serialize.query( <TAB><TAB>"api_version", api_version, "str" <TAB>) <TAB># Construct headers <TAB>header_parameters = {} # type: Dict[str, Any] <TAB>header_parameters["Content-Type"] = self._serialize.header( <TAB><TAB>"content_type", content_type, "str" <TAB>) <TAB>header_parameters["Accept"] = self._serialize.header("accept", accept, "str") <TAB>body_content_kwargs = {} # type: Dict[str, Any] <TAB>body_content = self._serialize.body(parameters, "QueryTroubleshootingParameters") <TAB>body_content_kwargs["content"] = body_content <TAB>request = self._client.post( <TAB><TAB>url, query_parameters, header_parameters, **body_content_kwargs <TAB>) <TAB>pipeline_response = await self._client._pipeline.run( <TAB><TAB>request, stream=False, **kwargs <TAB>) <TAB>response = pipeline_response.http_response <TAB>if response.status_code not in [200, 202]: <TAB><TAB>map_error( <TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <TAB><TAB>error = self._deserialize(_models.ErrorResponse, response) <TAB><TAB>raise HttpResponseError( <TAB><TAB><TAB>response=response, model=error,<MASK> <TAB>if response.status_code == 200: <TAB><TAB>deserialized = self._deserialize("TroubleshootingResult", pipeline_response) <TAB>if response.status_code == 202: <TAB><TAB>deserialized = self._deserialize("TroubleshootingResult", pipeline_response) <TAB>if cls: <TAB><TAB>return cls(pipeline_response, deserialized, {}) <TAB>return deserialized "
"error_format=ARMErrorFormat <TAB><TAB>)"
"def sftp_copy_from_json(self, input): <TAB>try: <TAB><TAB>src_path = input.get("src_path") <TAB><TAB>dest_path = input.get("dest_path") <TAB><TAB>src_host = input.get("src_host") <TAB><TAB>src_port = input.get("src_port") <TAB><TAB>src_username = input.get("src_username") <TAB><TAB>src_password = input.get("src_password") <TAB><TAB>dest_host = input.get("dest_host") <TAB><TAB>dest_port = input.get("dest_port") <TAB><TAB>dest_username = input.get("dest_username") <TAB><TAB>dest_password = input.get("dest_password") <TAB>except: <TAB><TAB>return "Couldn't get all objects" <TAB>curr_dir = os.getcwd() <TAB>temp_dir = os.path.join(curr_dir, r"temp_data") <TAB>os.makedirs(temp_dir) <TAB>async with asyncssh.connect( <TAB><TAB>host=src_host, <TAB><TAB>port=src_port, <TAB><TAB>username=src_username, <TAB><TAB>password=src_password, <TAB><TAB>known_hosts=None, <TAB>) as conn: <TAB><TAB>async with asyncssh.connect( <TAB><TAB><TAB>host=dest_host, <TAB><TAB><TAB>port=dest_port, <TAB><TAB><TAB>username=dest_username, <TAB><TAB><TAB>password=dest_password, <TAB><TAB><TAB>tunnel=conn, <TAB><TAB><TAB>known_hosts=None, <TAB><TAB>) as tunneled_conn: <TAB><TAB><TAB># grab remote file, place in container <TAB><TAB><TAB>async with conn.start_sftp_client() as sftp: <TAB><TAB><TAB><TAB>results = await sftp.get(src_path, temp_dir) <TAB><TAB><TAB>spliced_path = src_path.split("/") <TAB><TAB><TAB>file_name = spliced_path[len(spliced_path) - 1] <TAB><TAB><TAB># copy grabbed file to desired location <TAB><TAB><TAB>async with tunneled_conn.start_sftp_client() as sftp2: <TAB><TAB><TAB><TAB>results2 = await sftp2.put(temp_dir + "/" + file_name, dest_path) <TAB># cleaning up temp file <TAB>for file in os.listdir(temp_dir): <TAB><TAB>file_path = os.path.join(temp_dir, file) <TAB><TAB>if os.path.isfile(file_path): <TAB><TAB><TAB>os.remove(file_path) <TAB>os.rmdir(temp_dir) <TAB>return "Successfully Copied File." "
"<TAB><TAB>src_path = input.get("src_path") <TAB><TAB>dest_path = input.get("dest_path") <TAB><TAB>src_host = input.get("src_host") <TAB><TAB>src_port = input.get("src_port") <TAB><TAB>src_username = input.get("src_username") <TAB><TAB>src_password = input.get("src_password") <TAB><TAB>dest_host = input.get("dest_host") <TAB><TAB>dest_port = input.get("dest_port") <TAB><TAB>dest_username = input.get("dest_username") <TAB><TAB>dest_password = input.get("dest_password")"
"<TAB><TAB>src_path = input.get("src_path") <TAB><TAB>dest_path = input.get("dest_path") <TAB><TAB>src_host = input.get("src_host") <TAB><TAB>src_port = input.get("src_port") <TAB><TAB>src_username = input.get("src_username") <TAB><TAB>src_password = input.get("src_password") <TAB><TAB>dest_host =<MASK>"
"def sftp_copy_from_json(self, input): <TAB>try: <TAB><TAB>src_path = input.get("src_path") <TAB><TAB>dest_path = input.get("dest_path") <TAB><TAB>src_host = input.get("src_host") <TAB><TAB>src_port = input.get("src_port") <TAB><TAB>src_username = input.get("src_username") <TAB><TAB>src_password = input.get("src_password") <TAB><TAB>dest_host =<MASK> <TAB>except: <TAB><TAB>return "Couldn't get all objects" <TAB>curr_dir = os.getcwd() <TAB>temp_dir = os.path.join(curr_dir, r"temp_data") <TAB>os.makedirs(temp_dir) <TAB>async with asyncssh.connect( <TAB><TAB>host=src_host, <TAB><TAB>port=src_port, <TAB><TAB>username=src_username, <TAB><TAB>password=src_password, <TAB><TAB>known_hosts=None, <TAB>) as conn: <TAB><TAB>async with asyncssh.connect( <TAB><TAB><TAB>host=dest_host, <TAB><TAB><TAB>port=dest_port, <TAB><TAB><TAB>username=dest_username, <TAB><TAB><TAB>password=dest_password, <TAB><TAB><TAB>tunnel=conn, <TAB><TAB><TAB>known_hosts=None, <TAB><TAB>) as tunneled_conn: <TAB><TAB><TAB># grab remote file, place in container <TAB><TAB><TAB>async with conn.start_sftp_client() as sftp: <TAB><TAB><TAB><TAB>results = await sftp.get(src_path, temp_dir) <TAB><TAB><TAB>spliced_path = src_path.split("/") <TAB><TAB><TAB>file_name = spliced_path[len(spliced_path) - 1] <TAB><TAB><TAB># copy grabbed file to desired location <TAB><TAB><TAB>async with tunneled_conn.start_sftp_client() as sftp2: <TAB><TAB><TAB><TAB>results2 = await sftp2.put(temp_dir + "/" + file_name, dest_path) <TAB># cleaning up temp file <TAB>for file in os.listdir(temp_dir): <TAB><TAB>file_path = os.path.join(temp_dir, file) <TAB><TAB>if os.path.isfile(file_path): <TAB><TAB><TAB>os.remove(file_path) <TAB>os.rmdir(temp_dir) <TAB>return "Successfully Copied File." "
"input.get("dest_host") <TAB><TAB>dest_port = input.get("dest_port") <TAB><TAB>dest_username = input.get("dest_username") <TAB><TAB>dest_password = input.get("dest_password")"
"<TAB><TAB>src_path = input.get("src_path") <TAB><TAB>dest_path = input.get("dest_path") <TAB><TAB>src_host = input.get("src_host") <TAB><TAB>src_port = input.get("src_port") <TAB><TAB>src_username = input.get("src_username") <TAB><TAB>src_password = input.get("src_password") <TAB><TAB>dest_host = input.get("dest_host") <TAB><TAB>dest_port = input.get("dest_port") <TAB><TAB>dest_username =<MASK>"
"def sftp_copy_from_json(self, input): <TAB>try: <TAB><TAB>src_path = input.get("src_path") <TAB><TAB>dest_path = input.get("dest_path") <TAB><TAB>src_host = input.get("src_host") <TAB><TAB>src_port = input.get("src_port") <TAB><TAB>src_username = input.get("src_username") <TAB><TAB>src_password = input.get("src_password") <TAB><TAB>dest_host = input.get("dest_host") <TAB><TAB>dest_port = input.get("dest_port") <TAB><TAB>dest_username =<MASK> <TAB>except: <TAB><TAB>return "Couldn't get all objects" <TAB>curr_dir = os.getcwd() <TAB>temp_dir = os.path.join(curr_dir, r"temp_data") <TAB>os.makedirs(temp_dir) <TAB>async with asyncssh.connect( <TAB><TAB>host=src_host, <TAB><TAB>port=src_port, <TAB><TAB>username=src_username, <TAB><TAB>password=src_password, <TAB><TAB>known_hosts=None, <TAB>) as conn: <TAB><TAB>async with asyncssh.connect( <TAB><TAB><TAB>host=dest_host, <TAB><TAB><TAB>port=dest_port, <TAB><TAB><TAB>username=dest_username, <TAB><TAB><TAB>password=dest_password, <TAB><TAB><TAB>tunnel=conn, <TAB><TAB><TAB>known_hosts=None, <TAB><TAB>) as tunneled_conn: <TAB><TAB><TAB># grab remote file, place in container <TAB><TAB><TAB>async with conn.start_sftp_client() as sftp: <TAB><TAB><TAB><TAB>results = await sftp.get(src_path, temp_dir) <TAB><TAB><TAB>spliced_path = src_path.split("/") <TAB><TAB><TAB>file_name = spliced_path[len(spliced_path) - 1] <TAB><TAB><TAB># copy grabbed file to desired location <TAB><TAB><TAB>async with tunneled_conn.start_sftp_client() as sftp2: <TAB><TAB><TAB><TAB>results2 = await sftp2.put(temp_dir + "/" + file_name, dest_path) <TAB># cleaning up temp file <TAB>for file in os.listdir(temp_dir): <TAB><TAB>file_path = os.path.join(temp_dir, file) <TAB><TAB>if os.path.isfile(file_path): <TAB><TAB><TAB>os.remove(file_path) <TAB>os.rmdir(temp_dir) <TAB>return "Successfully Copied File." "
"input.get("dest_username") <TAB><TAB>dest_password = input.get("dest_password")"
"<TAB><TAB>src_path = input.get("src_path") <TAB><TAB>dest_path = input.get("dest_path") <TAB><TAB>src_host = input.get("src_host") <TAB><TAB>src_port = input.get("src_port") <TAB><TAB>src_username = input.get("src_username") <TAB><TAB>src_password = input.get("src_password") <TAB><TAB>dest_host = input.get("dest_host") <TAB><TAB>dest_port = input.get("dest_port") <TAB><TAB>dest_username = input.get("dest_username") <MASK>"
"def sftp_copy_from_json(self, input): <TAB>try: <TAB><TAB>src_path = input.get("src_path") <TAB><TAB>dest_path = input.get("dest_path") <TAB><TAB>src_host = input.get("src_host") <TAB><TAB>src_port = input.get("src_port") <TAB><TAB>src_username = input.get("src_username") <TAB><TAB>src_password = input.get("src_password") <TAB><TAB>dest_host = input.get("dest_host") <TAB><TAB>dest_port = input.get("dest_port") <TAB><TAB>dest_username = input.get("dest_username") <MASK> <TAB>except: <TAB><TAB>return "Couldn't get all objects" <TAB>curr_dir = os.getcwd() <TAB>temp_dir = os.path.join(curr_dir, r"temp_data") <TAB>os.makedirs(temp_dir) <TAB>async with asyncssh.connect( <TAB><TAB>host=src_host, <TAB><TAB>port=src_port, <TAB><TAB>username=src_username, <TAB><TAB>password=src_password, <TAB><TAB>known_hosts=None, <TAB>) as conn: <TAB><TAB>async with asyncssh.connect( <TAB><TAB><TAB>host=dest_host, <TAB><TAB><TAB>port=dest_port, <TAB><TAB><TAB>username=dest_username, <TAB><TAB><TAB>password=dest_password, <TAB><TAB><TAB>tunnel=conn, <TAB><TAB><TAB>known_hosts=None, <TAB><TAB>) as tunneled_conn: <TAB><TAB><TAB># grab remote file, place in container <TAB><TAB><TAB>async with conn.start_sftp_client() as sftp: <TAB><TAB><TAB><TAB>results = await sftp.get(src_path, temp_dir) <TAB><TAB><TAB>spliced_path = src_path.split("/") <TAB><TAB><TAB>file_name = spliced_path[len(spliced_path) - 1] <TAB><TAB><TAB># copy grabbed file to desired location <TAB><TAB><TAB>async with tunneled_conn.start_sftp_client() as sftp2: <TAB><TAB><TAB><TAB>results2 = await sftp2.put(temp_dir + "/" + file_name, dest_path) <TAB># cleaning up temp file <TAB>for file in os.listdir(temp_dir): <TAB><TAB>file_path = os.path.join(temp_dir, file) <TAB><TAB>if os.path.isfile(file_path): <TAB><TAB><TAB>os.remove(file_path) <TAB>os.rmdir(temp_dir) <TAB>return "Successfully Copied File." "
"<TAB><TAB>dest_password = input.get("dest_password")"
"def sftp_copy_from_json(self, input): <TAB>try: <TAB><TAB>src_path = input.get("src_path") <TAB><TAB>dest_path = input.get("dest_path") <TAB><TAB>src_host = input.get("src_host") <TAB><TAB>src_port = input.get("src_port") <TAB><TAB>src_username = input.get("src_username") <TAB><TAB>src_password = input.get("src_password") <TAB><TAB>dest_host = input.get("dest_host") <TAB><TAB>dest_port = input.get("dest_port") <TAB><TAB>dest_username = input.get("dest_username") <TAB><TAB>dest_password = input.get("dest_password") <TAB>except: <TAB><TAB>return "Couldn't get all objects" <TAB>curr_dir = os.getcwd() <TAB>temp_dir = os.path.join(curr_dir, r"temp_data") <TAB>os.makedirs(temp_dir) <TAB>async with asyncssh.connect( <TAB><TAB>host=src_host, <TAB><TAB>port=src_port, <TAB><TAB>username=src_username, <TAB><TAB>password=src_password, <TAB><TAB>known_hosts=None, <TAB>) as conn: <TAB><TAB>async with asyncssh.connect( <TAB><TAB><TAB>host=dest_host, <TAB><TAB><TAB>port=dest_port, <TAB><TAB><TAB>username=dest_username, <TAB><TAB><TAB>password=dest_password, <TAB><TAB><TAB>tunnel=conn, <TAB><TAB><TAB>known_hosts=None, <TAB><TAB>) as tunneled_conn: <TAB><TAB><TAB># grab remote file, place in container <TAB><TAB><TAB>async with conn.start_sftp_client() as sftp: <TAB><TAB><TAB><TAB>results = await sftp.get(src_path, temp_dir) <TAB><TAB><TAB>spliced_path = src_path.split("/") <TAB><TAB><TAB>file_name = spliced_path[len(spliced_path) - 1] <TAB><TAB><TAB># copy grabbed file to desired location <TAB><TAB><TAB>async with tunneled_conn.start_sftp_client() as sftp2: <TAB><TAB><TAB><TAB>results2 = await sftp2.put(temp_dir + "/" + file_name, dest_path) <TAB># cleaning up temp file <TAB>for file in os.listdir(temp_dir): <TAB><TAB>file_path = os.path.join(temp_dir, file) <TAB><TAB>if os.path.isfile(file_path): <TAB><TAB><TAB>os.remove(file_path) <TAB>os.rmdir(temp_dir) <TAB>return "Successfully Copied File." "
"<TAB><TAB>async with asyncssh.connect( <TAB><TAB><TAB>host=dest_host, <TAB><TAB><TAB>port=dest_port, <TAB><TAB><TAB>username=dest_username, <TAB><TAB><TAB>password=dest_password, <TAB><TAB><TAB>tunnel=conn, <TAB><TAB><TAB>known_hosts=None, <TAB><TAB>) as tunneled_conn: <TAB><TAB><TAB># grab remote file, place in container <TAB><TAB><TAB>async with conn.start_sftp_client() as sftp: <TAB><TAB><TAB><TAB>results = await sftp.get(src_path, temp_dir) <TAB><TAB><TAB>spliced_path = src_path.split("/") <TAB><TAB><TAB>file_name = spliced_path[len(spliced_path) - 1] <TAB><TAB><TAB># copy grabbed file to desired location <TAB><TAB><TAB>async with tunneled_conn.start_sftp_client() as sftp2: <TAB><TAB><TAB><TAB>results2 = await sftp2.put(temp_dir + "/" + file_name, dest_path)"
"<TAB><TAB>async with asyncssh.connect( <TAB><TAB><TAB>host=dest_host, <TAB><TAB><TAB>port=dest_port, <TAB><TAB><TAB>username=dest_username, <TAB><TAB><TAB>password=dest_password, <TAB><TAB><TAB>tunnel=conn, <TAB><TAB><TAB>known_hosts=None, <TAB><TAB>) as tunneled_conn: <TAB><TAB><TAB># grab remote file, place in container <TAB><TAB><TAB>async with conn.start_sftp_client() as sftp: <TAB><TAB><TAB><TAB>results = await sftp.get(src_path, temp_dir) <TAB><TAB><TAB>spliced_path = src_path.split("/") <TAB><TAB><TAB>file_name = spliced_path[len(spliced_path) - 1] <TAB><TAB><TAB># copy grabbed file to desired location <TAB><TAB><TAB>async with<MASK>"
"def sftp_copy_from_json(self, input): <TAB>try: <TAB><TAB>src_path = input.get("src_path") <TAB><TAB>dest_path = input.get("dest_path") <TAB><TAB>src_host = input.get("src_host") <TAB><TAB>src_port = input.get("src_port") <TAB><TAB>src_username = input.get("src_username") <TAB><TAB>src_password = input.get("src_password") <TAB><TAB>dest_host = input.get("dest_host") <TAB><TAB>dest_port = input.get("dest_port") <TAB><TAB>dest_username = input.get("dest_username") <TAB><TAB>dest_password = input.get("dest_password") <TAB>except: <TAB><TAB>return "Couldn't get all objects" <TAB>curr_dir = os.getcwd() <TAB>temp_dir = os.path.join(curr_dir, r"temp_data") <TAB>os.makedirs(temp_dir) <TAB>async with asyncssh.connect( <TAB><TAB>host=src_host, <TAB><TAB>port=src_port, <TAB><TAB>username=src_username, <TAB><TAB>password=src_password, <TAB><TAB>known_hosts=None, <TAB>) as conn: <TAB><TAB>async with asyncssh.connect( <TAB><TAB><TAB>host=dest_host, <TAB><TAB><TAB>port=dest_port, <TAB><TAB><TAB>username=dest_username, <TAB><TAB><TAB>password=dest_password, <TAB><TAB><TAB>tunnel=conn, <TAB><TAB><TAB>known_hosts=None, <TAB><TAB>) as tunneled_conn: <TAB><TAB><TAB># grab remote file, place in container <TAB><TAB><TAB>async with conn.start_sftp_client() as sftp: <TAB><TAB><TAB><TAB>results = await sftp.get(src_path, temp_dir) <TAB><TAB><TAB>spliced_path = src_path.split("/") <TAB><TAB><TAB>file_name = spliced_path[len(spliced_path) - 1] <TAB><TAB><TAB># copy grabbed file to desired location <TAB><TAB><TAB>async with<MASK> <TAB># cleaning up temp file <TAB>for file in os.listdir(temp_dir): <TAB><TAB>file_path = os.path.join(temp_dir, file) <TAB><TAB>if os.path.isfile(file_path): <TAB><TAB><TAB>os.remove(file_path) <TAB>os.rmdir(temp_dir) <TAB>return "Successfully Copied File." "
"tunneled_conn.start_sftp_client() as sftp2: <TAB><TAB><TAB><TAB>results2 = await sftp2.put(temp_dir + "/" + file_name, dest_path)"
"<TAB><TAB>async with asyncssh.connect( <TAB><TAB><TAB>host=dest_host, <TAB><TAB><TAB>port=dest_port, <TAB><TAB><TAB>username=dest_username, <TAB><TAB><TAB>password=dest_password, <TAB><TAB><TAB>tunnel=conn, <TAB><TAB><TAB>known_hosts=None, <TAB><TAB>) as tunneled_conn: <TAB><TAB><TAB># grab remote file, place in container <TAB><TAB><TAB>async with conn.start_sftp_client() as sftp: <TAB><TAB><TAB><TAB>results = await sftp.get(src_path, temp_dir) <TAB><TAB><TAB>spliced_path = src_path.split("/") <TAB><TAB><TAB>file_name = spliced_path[len(spliced_path) - 1] <TAB><TAB><TAB># copy grabbed file to desired location <TAB><TAB><TAB>async with tunneled_conn.start_sftp_client() as sftp2: <TAB><TAB><TAB><TAB>results2 = await<MASK>"
"def sftp_copy_from_json(self, input): <TAB>try: <TAB><TAB>src_path = input.get("src_path") <TAB><TAB>dest_path = input.get("dest_path") <TAB><TAB>src_host = input.get("src_host") <TAB><TAB>src_port = input.get("src_port") <TAB><TAB>src_username = input.get("src_username") <TAB><TAB>src_password = input.get("src_password") <TAB><TAB>dest_host = input.get("dest_host") <TAB><TAB>dest_port = input.get("dest_port") <TAB><TAB>dest_username = input.get("dest_username") <TAB><TAB>dest_password = input.get("dest_password") <TAB>except: <TAB><TAB>return "Couldn't get all objects" <TAB>curr_dir = os.getcwd() <TAB>temp_dir = os.path.join(curr_dir, r"temp_data") <TAB>os.makedirs(temp_dir) <TAB>async with asyncssh.connect( <TAB><TAB>host=src_host, <TAB><TAB>port=src_port, <TAB><TAB>username=src_username, <TAB><TAB>password=src_password, <TAB><TAB>known_hosts=None, <TAB>) as conn: <TAB><TAB>async with asyncssh.connect( <TAB><TAB><TAB>host=dest_host, <TAB><TAB><TAB>port=dest_port, <TAB><TAB><TAB>username=dest_username, <TAB><TAB><TAB>password=dest_password, <TAB><TAB><TAB>tunnel=conn, <TAB><TAB><TAB>known_hosts=None, <TAB><TAB>) as tunneled_conn: <TAB><TAB><TAB># grab remote file, place in container <TAB><TAB><TAB>async with conn.start_sftp_client() as sftp: <TAB><TAB><TAB><TAB>results = await sftp.get(src_path, temp_dir) <TAB><TAB><TAB>spliced_path = src_path.split("/") <TAB><TAB><TAB>file_name = spliced_path[len(spliced_path) - 1] <TAB><TAB><TAB># copy grabbed file to desired location <TAB><TAB><TAB>async with tunneled_conn.start_sftp_client() as sftp2: <TAB><TAB><TAB><TAB>results2 = await<MASK> <TAB># cleaning up temp file <TAB>for file in os.listdir(temp_dir): <TAB><TAB>file_path = os.path.join(temp_dir, file) <TAB><TAB>if os.path.isfile(file_path): <TAB><TAB><TAB>os.remove(file_path) <TAB>os.rmdir(temp_dir) <TAB>return "Successfully Copied File." "
"sftp2.put(temp_dir + "/" + file_name, dest_path)"
"<TAB><TAB>async with asyncssh.connect( <TAB><TAB><TAB>host=dest_host, <TAB><TAB><TAB>port=dest_port, <TAB><TAB><TAB>username=dest_username, <TAB><TAB><TAB>password=dest_password, <TAB><TAB><TAB>tunnel=conn, <TAB><TAB><TAB>known_hosts=None, <TAB><TAB>) as tunneled_conn: <TAB><TAB><TAB># grab remote file, place in container <TAB><TAB><TAB>async with conn.start_sftp_client() as sftp: <TAB><TAB><TAB><TAB>results = await sftp.get(src_path, temp_dir) <TAB><TAB><TAB>spliced_path = src_path.split("/") <TAB><TAB><TAB>file_name = spliced_path[len(spliced_path) - 1] <TAB><TAB><TAB># copy grabbed file to desired location <TAB><TAB><TAB>async with tunneled_conn.start_sftp_client() as sftp2: <TAB><TAB><TAB><TAB>results2 = await<MASK>"
"def sftp_copy_from_json(self, input): <TAB>try: <TAB><TAB>src_path = input.get("src_path") <TAB><TAB>dest_path = input.get("dest_path") <TAB><TAB>src_host = input.get("src_host") <TAB><TAB>src_port = input.get("src_port") <TAB><TAB>src_username = input.get("src_username") <TAB><TAB>src_password = input.get("src_password") <TAB><TAB>dest_host = input.get("dest_host") <TAB><TAB>dest_port = input.get("dest_port") <TAB><TAB>dest_username = input.get("dest_username") <TAB><TAB>dest_password = input.get("dest_password") <TAB>except: <TAB><TAB>return "Couldn't get all objects" <TAB>curr_dir = os.getcwd() <TAB>temp_dir = os.path.join(curr_dir, r"temp_data") <TAB>os.makedirs(temp_dir) <TAB>async with asyncssh.connect( <TAB><TAB>host=src_host, <TAB><TAB>port=src_port, <TAB><TAB>username=src_username, <TAB><TAB>password=src_password, <TAB><TAB>known_hosts=None, <TAB>) as conn: <TAB><TAB>async with asyncssh.connect( <TAB><TAB><TAB>host=dest_host, <TAB><TAB><TAB>port=dest_port, <TAB><TAB><TAB>username=dest_username, <TAB><TAB><TAB>password=dest_password, <TAB><TAB><TAB>tunnel=conn, <TAB><TAB><TAB>known_hosts=None, <TAB><TAB>) as tunneled_conn: <TAB><TAB><TAB># grab remote file, place in container <TAB><TAB><TAB>async with conn.start_sftp_client() as sftp: <TAB><TAB><TAB><TAB>results = await sftp.get(src_path, temp_dir) <TAB><TAB><TAB>spliced_path = src_path.split("/") <TAB><TAB><TAB>file_name = spliced_path[len(spliced_path) - 1] <TAB><TAB><TAB># copy grabbed file to desired location <TAB><TAB><TAB>async with tunneled_conn.start_sftp_client() as sftp2: <TAB><TAB><TAB><TAB>results2 = await<MASK> <TAB># cleaning up temp file <TAB>for file in os.listdir(temp_dir): <TAB><TAB>file_path = os.path.join(temp_dir, file) <TAB><TAB>if os.path.isfile(file_path): <TAB><TAB><TAB>os.remove(file_path) <TAB>os.rmdir(temp_dir) <TAB>return "Successfully Copied File." "
"sftp2.put(temp_dir + "/" + file_name, dest_path)"
"def FindShortestPathToNode(self, start, finish, blocked): <TAB>"""Find a shortest path from start to finish, going backwards. <TAB>Args: <TAB> start: The node to start at. If this node appears in blocked, we can't <TAB><TAB>reach finish (unless start==finish). <TAB> finish: The node we're trying to reach. This node is always considered <TAB><TAB>reachable, even if it appears in blocked. <TAB> blocked: A set of nodes we're not allowed to traverse. <TAB>Returns: <TAB> An iterable over nodes, representing the shortest path (as <TAB> [start, ..., finish]), or None if no path exists. <TAB>""" <TAB>queue = collections.deque([start]) <TAB>previous = {start: None} <TAB>seen = set() <TAB>while queue: <TAB><TAB>node = queue.popleft() <TAB><TAB>if node is finish: <TAB><TAB><TAB>break <TAB><TAB>if node in seen or node in blocked: <TAB><TAB><TAB>continue <TAB><TAB>seen.add(node) <TAB><TAB>for n in node.incoming: <TAB><TAB><TAB>if n not in previous: <TAB><TAB><TAB><TAB>previous[n] = node <TAB><TAB>queue.extend(node.incoming) <TAB>else: <TAB><TAB>return None <TAB>node = finish <TAB>path = collections.deque() <TAB>while node: <TAB><TAB>path.appendleft(node) <TAB><TAB>node = previous[node] <TAB>return path "
"<TAB><TAB>node = queue.popleft() <TAB><TAB>if node is finish: <TAB><TAB><TAB>break <TAB><TAB>if node in seen or node in blocked: <TAB><TAB><TAB>continue <TAB><TAB>seen.add(node) <TAB><TAB>for n in node.incoming: <TAB><TAB><TAB>if n not in previous: <TAB><TAB><TAB><TAB>previous[n] = node <TAB><TAB>queue.extend(node.incoming)"
"<TAB><TAB>node = queue.popleft() <TAB><TAB>if node is finish: <TAB><TAB><TAB>break <TAB><TAB>if node in seen or node in blocked: <TAB><TAB><TAB>continue <MASK>"
"def FindShortestPathToNode(self, start, finish, blocked): <TAB>"""Find a shortest path from start to finish, going backwards. <TAB>Args: <TAB> start: The node to start at. If this node appears in blocked, we can't <TAB><TAB>reach finish (unless start==finish). <TAB> finish: The node we're trying to reach. This node is always considered <TAB><TAB>reachable, even if it appears in blocked. <TAB> blocked: A set of nodes we're not allowed to traverse. <TAB>Returns: <TAB> An iterable over nodes, representing the shortest path (as <TAB> [start, ..., finish]), or None if no path exists. <TAB>""" <TAB>queue = collections.deque([start]) <TAB>previous = {start: None} <TAB>seen = set() <TAB>while queue: <TAB><TAB>node = queue.popleft() <TAB><TAB>if node is finish: <TAB><TAB><TAB>break <TAB><TAB>if node in seen or node in blocked: <TAB><TAB><TAB>continue <MASK> <TAB>else: <TAB><TAB>return None <TAB>node = finish <TAB>path = collections.deque() <TAB>while node: <TAB><TAB>path.appendleft(node) <TAB><TAB>node = previous[node] <TAB>return path "
"<TAB><TAB>seen.add(node) <TAB><TAB>for n in node.incoming: <TAB><TAB><TAB>if n not in previous: <TAB><TAB><TAB><TAB>previous[n] = node <TAB><TAB>queue.extend(node.incoming)"
"<TAB><TAB>node = queue.popleft() <TAB><TAB>if node is finish: <TAB><TAB><TAB>break <TAB><TAB>if node in seen or node in blocked: <TAB><TAB><TAB>continue <TAB><TAB>seen.add(node) <TAB><TAB>for n in node.incoming: <TAB><TAB><TAB>if n not in previous: <MASK>"
"def FindShortestPathToNode(self, start, finish, blocked): <TAB>"""Find a shortest path from start to finish, going backwards. <TAB>Args: <TAB> start: The node to start at. If this node appears in blocked, we can't <TAB><TAB>reach finish (unless start==finish). <TAB> finish: The node we're trying to reach. This node is always considered <TAB><TAB>reachable, even if it appears in blocked. <TAB> blocked: A set of nodes we're not allowed to traverse. <TAB>Returns: <TAB> An iterable over nodes, representing the shortest path (as <TAB> [start, ..., finish]), or None if no path exists. <TAB>""" <TAB>queue = collections.deque([start]) <TAB>previous = {start: None} <TAB>seen = set() <TAB>while queue: <TAB><TAB>node = queue.popleft() <TAB><TAB>if node is finish: <TAB><TAB><TAB>break <TAB><TAB>if node in seen or node in blocked: <TAB><TAB><TAB>continue <TAB><TAB>seen.add(node) <TAB><TAB>for n in node.incoming: <TAB><TAB><TAB>if n not in previous: <MASK> <TAB>else: <TAB><TAB>return None <TAB>node = finish <TAB>path = collections.deque() <TAB>while node: <TAB><TAB>path.appendleft(node) <TAB><TAB>node = previous[node] <TAB>return path "
"<TAB><TAB><TAB><TAB>previous[n] = node <TAB><TAB>queue.extend(node.incoming)"
"<TAB><TAB>node = queue.popleft() <TAB><TAB>if node is finish: <TAB><TAB><TAB>break <TAB><TAB>if node in seen or node in blocked: <TAB><TAB><TAB>continue <TAB><TAB>seen.add(node) <TAB><TAB>for n in node.incoming: <TAB><TAB><TAB>if n not in previous: <TAB><TAB><TAB><TAB>previous[n] =<MASK>"
"def FindShortestPathToNode(self, start, finish, blocked): <TAB>"""Find a shortest path from start to finish, going backwards. <TAB>Args: <TAB> start: The node to start at. If this node appears in blocked, we can't <TAB><TAB>reach finish (unless start==finish). <TAB> finish: The node we're trying to reach. This node is always considered <TAB><TAB>reachable, even if it appears in blocked. <TAB> blocked: A set of nodes we're not allowed to traverse. <TAB>Returns: <TAB> An iterable over nodes, representing the shortest path (as <TAB> [start, ..., finish]), or None if no path exists. <TAB>""" <TAB>queue = collections.deque([start]) <TAB>previous = {start: None} <TAB>seen = set() <TAB>while queue: <TAB><TAB>node = queue.popleft() <TAB><TAB>if node is finish: <TAB><TAB><TAB>break <TAB><TAB>if node in seen or node in blocked: <TAB><TAB><TAB>continue <TAB><TAB>seen.add(node) <TAB><TAB>for n in node.incoming: <TAB><TAB><TAB>if n not in previous: <TAB><TAB><TAB><TAB>previous[n] =<MASK> <TAB>else: <TAB><TAB>return None <TAB>node = finish <TAB>path = collections.deque() <TAB>while node: <TAB><TAB>path.appendleft(node) <TAB><TAB>node = previous[node] <TAB>return path "
"node <TAB><TAB>queue.extend(node.incoming)"
"def set_field(cls, ofproto, action_str): <TAB>try: <TAB><TAB>assert action_str.startswith("set_field:") <TAB><TAB>value, key = action_str[len("set_field:") :].split("->", 1) <TAB><TAB>fieldarg = dict(field=ofp_ofctl_field_name_to_ryu(key)) <TAB><TAB>m = value.find("/") <TAB><TAB>if m >= 0: <TAB><TAB><TAB>fieldarg["value"] = str_to_int(value[:m]) <TAB><TAB><TAB>fieldarg["mask"] = str_to_int(value[m + 1 :]) <TAB><TAB>else: <TAB><TAB><TAB>fieldarg["value"] = str_to_int(value) <TAB>except Exception: <TAB><TAB>raise ryu.exception.OFPInvalidActionString(action_str=action_str) <TAB>return dict(OFPActionSetField={"field": {"OXMTlv": fieldarg}}) "
"<TAB><TAB>assert action_str.startswith("set_field:") <TAB><TAB>value, key = action_str[len("set_field:") :].split("->", 1) <TAB><TAB>fieldarg = dict(field=ofp_ofctl_field_name_to_ryu(key)) <TAB><TAB>m = value.find("/") <TAB><TAB>if m >= 0: <TAB><TAB><TAB>fieldarg["value"] = str_to_int(value[:m]) <TAB><TAB><TAB>fieldarg["mask"] = str_to_int(value[m + 1 :]) <TAB><TAB>else: <TAB><TAB><TAB>fieldarg["value"] = str_to_int(value)"
"<TAB><TAB>assert action_str.startswith("set_field:") <TAB><TAB>value, key = action_str[len("set_field:") :].split("->", 1) <TAB><TAB>fieldarg = dict(field=ofp_ofctl_field_name_to_ryu(key)) <TAB><TAB>m = value.find("/") <TAB><TAB>if m >= 0: <TAB><TAB><TAB>fieldarg["value"] =<MASK>"
"def set_field(cls, ofproto, action_str): <TAB>try: <TAB><TAB>assert action_str.startswith("set_field:") <TAB><TAB>value, key = action_str[len("set_field:") :].split("->", 1) <TAB><TAB>fieldarg = dict(field=ofp_ofctl_field_name_to_ryu(key)) <TAB><TAB>m = value.find("/") <TAB><TAB>if m >= 0: <TAB><TAB><TAB>fieldarg["value"] =<MASK> <TAB>except Exception: <TAB><TAB>raise ryu.exception.OFPInvalidActionString(action_str=action_str) <TAB>return dict(OFPActionSetField={"field": {"OXMTlv": fieldarg}}) "
"str_to_int(value[:m]) <TAB><TAB><TAB>fieldarg["mask"] = str_to_int(value[m + 1 :]) <TAB><TAB>else: <TAB><TAB><TAB>fieldarg["value"] = str_to_int(value)"
"<TAB><TAB>assert action_str.startswith("set_field:") <TAB><TAB>value, key = action_str[len("set_field:") :].split("->", 1) <TAB><TAB>fieldarg = dict(field=ofp_ofctl_field_name_to_ryu(key)) <TAB><TAB>m = value.find("/") <TAB><TAB>if m >= 0: <TAB><TAB><TAB>fieldarg["value"] = str_to_int(value[:m]) <TAB><TAB><TAB>fieldarg["mask"] = str_to_int(value[m + 1 :]) <MASK>"
"def set_field(cls, ofproto, action_str): <TAB>try: <TAB><TAB>assert action_str.startswith("set_field:") <TAB><TAB>value, key = action_str[len("set_field:") :].split("->", 1) <TAB><TAB>fieldarg = dict(field=ofp_ofctl_field_name_to_ryu(key)) <TAB><TAB>m = value.find("/") <TAB><TAB>if m >= 0: <TAB><TAB><TAB>fieldarg["value"] = str_to_int(value[:m]) <TAB><TAB><TAB>fieldarg["mask"] = str_to_int(value[m + 1 :]) <MASK> <TAB>except Exception: <TAB><TAB>raise ryu.exception.OFPInvalidActionString(action_str=action_str) <TAB>return dict(OFPActionSetField={"field": {"OXMTlv": fieldarg}}) "
"<TAB><TAB>else: <TAB><TAB><TAB>fieldarg["value"] = str_to_int(value)"
"<TAB><TAB>assert action_str.startswith("set_field:") <TAB><TAB>value, key = action_str[len("set_field:") :].split("->", 1) <TAB><TAB>fieldarg = dict(field=ofp_ofctl_field_name_to_ryu(key)) <TAB><TAB>m = value.find("/") <TAB><TAB>if m >= 0: <TAB><TAB><TAB>fieldarg["value"] = str_to_int(value[:m]) <TAB><TAB><TAB>fieldarg["mask"] = str_to_int(value[m + 1 :]) <TAB><TAB>else: <MASK>"
"def set_field(cls, ofproto, action_str): <TAB>try: <TAB><TAB>assert action_str.startswith("set_field:") <TAB><TAB>value, key = action_str[len("set_field:") :].split("->", 1) <TAB><TAB>fieldarg = dict(field=ofp_ofctl_field_name_to_ryu(key)) <TAB><TAB>m = value.find("/") <TAB><TAB>if m >= 0: <TAB><TAB><TAB>fieldarg["value"] = str_to_int(value[:m]) <TAB><TAB><TAB>fieldarg["mask"] = str_to_int(value[m + 1 :]) <TAB><TAB>else: <MASK> <TAB>except Exception: <TAB><TAB>raise ryu.exception.OFPInvalidActionString(action_str=action_str) <TAB>return dict(OFPActionSetField={"field": {"OXMTlv": fieldarg}}) "
"<TAB><TAB><TAB>fieldarg["value"] = str_to_int(value)"
"def _merge_fn(strategy, per_replica_cov_matrix_update, per_replica_data_vector_update): <TAB>"""Merge the per-replica-updates.""" <TAB># Reduce the per-replica-updates using SUM. <TAB># pylint: disable=cell-var-from-loop <TAB>updates_and_vars = [ <TAB><TAB>(per_replica_cov_matrix_update, self._cov_matrix_list[k]), <TAB><TAB>(per_replica_data_vector_update, self._data_vector_list[k]), <TAB>] <TAB>reduced_updates = strategy.extended.batch_reduce_to( <TAB><TAB>tf.distribute.ReduceOp.SUM, updates_and_vars <TAB>) <TAB># Update the model variables. <TAB>self._cov_matrix_list[k].assign_add(reduced_updates[0]) <TAB>self._data_vector_list[k].assign_add(reduced_updates[1]) <TAB># Compute the eigendecomposition, if needed. <TAB>if self._use_eigendecomp: <TAB><TAB>eig_vals, eig_matrix = tf.linalg.eigh(self._cov_matrix_list[k]) <TAB><TAB>self._eig_vals_list[k].assign(eig_vals) <TAB><TAB>self._eig_matrix_list[k].assign(eig_matrix) "
"<TAB><TAB>eig_vals, eig_matrix = tf.linalg.eigh(self._cov_matrix_list[k]) <TAB><TAB>self._eig_vals_list[k].assign(eig_vals) <TAB><TAB>self._eig_matrix_list[k].assign(eig_matrix)"
"<TAB><TAB>eig_vals, eig_matrix =<MASK>"
"def _merge_fn(strategy, per_replica_cov_matrix_update, per_replica_data_vector_update): <TAB>"""Merge the per-replica-updates.""" <TAB># Reduce the per-replica-updates using SUM. <TAB># pylint: disable=cell-var-from-loop <TAB>updates_and_vars = [ <TAB><TAB>(per_replica_cov_matrix_update, self._cov_matrix_list[k]), <TAB><TAB>(per_replica_data_vector_update, self._data_vector_list[k]), <TAB>] <TAB>reduced_updates = strategy.extended.batch_reduce_to( <TAB><TAB>tf.distribute.ReduceOp.SUM, updates_and_vars <TAB>) <TAB># Update the model variables. <TAB>self._cov_matrix_list[k].assign_add(reduced_updates[0]) <TAB>self._data_vector_list[k].assign_add(reduced_updates[1]) <TAB># Compute the eigendecomposition, if needed. <TAB>if self._use_eigendecomp: <TAB><TAB>eig_vals, eig_matrix =<MASK> "
"tf.linalg.eigh(self._cov_matrix_list[k]) <TAB><TAB>self._eig_vals_list[k].assign(eig_vals) <TAB><TAB>self._eig_matrix_list[k].assign(eig_matrix)"
"<TAB><TAB>eig_vals, eig_matrix = tf.linalg.eigh(self._cov_matrix_list[k]) <MASK>"
"def _merge_fn(strategy, per_replica_cov_matrix_update, per_replica_data_vector_update): <TAB>"""Merge the per-replica-updates.""" <TAB># Reduce the per-replica-updates using SUM. <TAB># pylint: disable=cell-var-from-loop <TAB>updates_and_vars = [ <TAB><TAB>(per_replica_cov_matrix_update, self._cov_matrix_list[k]), <TAB><TAB>(per_replica_data_vector_update, self._data_vector_list[k]), <TAB>] <TAB>reduced_updates = strategy.extended.batch_reduce_to( <TAB><TAB>tf.distribute.ReduceOp.SUM, updates_and_vars <TAB>) <TAB># Update the model variables. <TAB>self._cov_matrix_list[k].assign_add(reduced_updates[0]) <TAB>self._data_vector_list[k].assign_add(reduced_updates[1]) <TAB># Compute the eigendecomposition, if needed. <TAB>if self._use_eigendecomp: <TAB><TAB>eig_vals, eig_matrix = tf.linalg.eigh(self._cov_matrix_list[k]) <MASK> "
"<TAB><TAB>self._eig_vals_list[k].assign(eig_vals) <TAB><TAB>self._eig_matrix_list[k].assign(eig_matrix)"
"<TAB><TAB>eig_vals, eig_matrix = tf.linalg.eigh(self._cov_matrix_list[k]) <TAB><TAB>self._eig_vals_list[k].assign(eig_vals) <MASK>"
"def _merge_fn(strategy, per_replica_cov_matrix_update, per_replica_data_vector_update): <TAB>"""Merge the per-replica-updates.""" <TAB># Reduce the per-replica-updates using SUM. <TAB># pylint: disable=cell-var-from-loop <TAB>updates_and_vars = [ <TAB><TAB>(per_replica_cov_matrix_update, self._cov_matrix_list[k]), <TAB><TAB>(per_replica_data_vector_update, self._data_vector_list[k]), <TAB>] <TAB>reduced_updates = strategy.extended.batch_reduce_to( <TAB><TAB>tf.distribute.ReduceOp.SUM, updates_and_vars <TAB>) <TAB># Update the model variables. <TAB>self._cov_matrix_list[k].assign_add(reduced_updates[0]) <TAB>self._data_vector_list[k].assign_add(reduced_updates[1]) <TAB># Compute the eigendecomposition, if needed. <TAB>if self._use_eigendecomp: <TAB><TAB>eig_vals, eig_matrix = tf.linalg.eigh(self._cov_matrix_list[k]) <TAB><TAB>self._eig_vals_list[k].assign(eig_vals) <MASK> "
"<TAB><TAB>self._eig_matrix_list[k].assign(eig_matrix)"
"def resolve(self, all_profiles, controls_manager=None): <TAB>if self.resolved: <TAB><TAB>return <TAB>self.resolve_controls(controls_manager) <TAB>self.resolved_selections = set(self.selected) <TAB>if self.extends: <TAB><TAB>if self.extends not in all_profiles: <TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>raise RuntimeError(msg) <TAB><TAB>extended_profile = all_profiles[self.extends] <TAB><TAB>extended_profile.resolve(all_profiles, controls_manager) <TAB><TAB>self.extend_by(extended_profile) <TAB>for uns in self.unselected: <TAB><TAB>self.resolved_selections.discard(uns) <TAB>self.unselected = [] <TAB>self.extends = None <TAB>self.selected = sorted(self.resolved_selections) <TAB>self.resolved = True "
"<TAB><TAB>if self.extends not in all_profiles: <TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>raise RuntimeError(msg) <TAB><TAB>extended_profile = all_profiles[self.extends] <TAB><TAB>extended_profile.resolve(all_profiles, controls_manager) <TAB><TAB>self.extend_by(extended_profile)"
"<TAB><TAB>if self.extends not in all_profiles: <TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>raise RuntimeError(msg) <MASK>"
"def resolve(self, all_profiles, controls_manager=None): <TAB>if self.resolved: <TAB><TAB>return <TAB>self.resolve_controls(controls_manager) <TAB>self.resolved_selections = set(self.selected) <TAB>if self.extends: <TAB><TAB>if self.extends not in all_profiles: <TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>raise RuntimeError(msg) <MASK> <TAB>for uns in self.unselected: <TAB><TAB>self.resolved_selections.discard(uns) <TAB>self.unselected = [] <TAB>self.extends = None <TAB>self.selected = sorted(self.resolved_selections) <TAB>self.resolved = True "
"<TAB><TAB>extended_profile = all_profiles[self.extends] <TAB><TAB>extended_profile.resolve(all_profiles, controls_manager) <TAB><TAB>self.extend_by(extended_profile)"
"<TAB><TAB>if self.extends not in all_profiles: <TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>raise RuntimeError(msg) <TAB><TAB>extended_profile = all_profiles[self.extends] <MASK>"
"def resolve(self, all_profiles, controls_manager=None): <TAB>if self.resolved: <TAB><TAB>return <TAB>self.resolve_controls(controls_manager) <TAB>self.resolved_selections = set(self.selected) <TAB>if self.extends: <TAB><TAB>if self.extends not in all_profiles: <TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>raise RuntimeError(msg) <TAB><TAB>extended_profile = all_profiles[self.extends] <MASK> <TAB>for uns in self.unselected: <TAB><TAB>self.resolved_selections.discard(uns) <TAB>self.unselected = [] <TAB>self.extends = None <TAB>self.selected = sorted(self.resolved_selections) <TAB>self.resolved = True "
"<TAB><TAB>extended_profile.resolve(all_profiles, controls_manager) <TAB><TAB>self.extend_by(extended_profile)"
"<TAB><TAB>if self.extends not in all_profiles: <TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>raise RuntimeError(msg) <TAB><TAB>extended_profile = all_profiles[self.extends] <TAB><TAB>extended_profile.resolve(all_profiles, controls_manager) <MASK>"
"def resolve(self, all_profiles, controls_manager=None): <TAB>if self.resolved: <TAB><TAB>return <TAB>self.resolve_controls(controls_manager) <TAB>self.resolved_selections = set(self.selected) <TAB>if self.extends: <TAB><TAB>if self.extends not in all_profiles: <TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>raise RuntimeError(msg) <TAB><TAB>extended_profile = all_profiles[self.extends] <TAB><TAB>extended_profile.resolve(all_profiles, controls_manager) <MASK> <TAB>for uns in self.unselected: <TAB><TAB>self.resolved_selections.discard(uns) <TAB>self.unselected = [] <TAB>self.extends = None <TAB>self.selected = sorted(self.resolved_selections) <TAB>self.resolved = True "
"<TAB><TAB>self.extend_by(extended_profile)"
"def resolve(self, all_profiles, controls_manager=None): <TAB>if self.resolved: <TAB><TAB>return <TAB>self.resolve_controls(controls_manager) <TAB>self.resolved_selections = set(self.selected) <TAB>if self.extends: <TAB><TAB>if self.extends not in all_profiles: <TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>raise RuntimeError(msg) <TAB><TAB>extended_profile = all_profiles[self.extends] <TAB><TAB>extended_profile.resolve(all_profiles, controls_manager) <TAB><TAB>self.extend_by(extended_profile) <TAB>for uns in self.unselected: <TAB><TAB>self.resolved_selections.discard(uns) <TAB>self.unselected = [] <TAB>self.extends = None <TAB>self.selected = sorted(self.resolved_selections) <TAB>self.resolved = True "
"<TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>raise RuntimeError(msg)"
"<TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <MASK>"
"def resolve(self, all_profiles, controls_manager=None): <TAB>if self.resolved: <TAB><TAB>return <TAB>self.resolve_controls(controls_manager) <TAB>self.resolved_selections = set(self.selected) <TAB>if self.extends: <TAB><TAB>if self.extends not in all_profiles: <TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <MASK> <TAB><TAB>extended_profile = all_profiles[self.extends] <TAB><TAB>extended_profile.resolve(all_profiles, controls_manager) <TAB><TAB>self.extend_by(extended_profile) <TAB>for uns in self.unselected: <TAB><TAB>self.resolved_selections.discard(uns) <TAB>self.unselected = [] <TAB>self.extends = None <TAB>self.selected = sorted(self.resolved_selections) <TAB>self.resolved = True "
"<TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>raise RuntimeError(msg)"
"<TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <MASK>"
"def resolve(self, all_profiles, controls_manager=None): <TAB>if self.resolved: <TAB><TAB>return <TAB>self.resolve_controls(controls_manager) <TAB>self.resolved_selections = set(self.selected) <TAB>if self.extends: <TAB><TAB>if self.extends not in all_profiles: <TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <MASK> <TAB><TAB>extended_profile = all_profiles[self.extends] <TAB><TAB>extended_profile.resolve(all_profiles, controls_manager) <TAB><TAB>self.extend_by(extended_profile) <TAB>for uns in self.unselected: <TAB><TAB>self.resolved_selections.discard(uns) <TAB>self.unselected = [] <TAB>self.extends = None <TAB>self.selected = sorted(self.resolved_selections) <TAB>self.resolved = True "
"<TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>raise RuntimeError(msg)"
"<TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <TAB><TAB><TAB><TAB>) <MASK>"
"def resolve(self, all_profiles, controls_manager=None): <TAB>if self.resolved: <TAB><TAB>return <TAB>self.resolve_controls(controls_manager) <TAB>self.resolved_selections = set(self.selected) <TAB>if self.extends: <TAB><TAB>if self.extends not in all_profiles: <TAB><TAB><TAB>msg = ( <TAB><TAB><TAB><TAB>"Profile {name} extends profile {extended}, but " <TAB><TAB><TAB><TAB>"only profiles {known_profiles} are available for resolution.".format( <TAB><TAB><TAB><TAB><TAB>name=self.id_, <TAB><TAB><TAB><TAB><TAB>extended=self.extends, <TAB><TAB><TAB><TAB><TAB>known_profiles=list(all_profiles.keys()), <TAB><TAB><TAB><TAB>) <MASK> <TAB><TAB>extended_profile = all_profiles[self.extends] <TAB><TAB>extended_profile.resolve(all_profiles, controls_manager) <TAB><TAB>self.extend_by(extended_profile) <TAB>for uns in self.unselected: <TAB><TAB>self.resolved_selections.discard(uns) <TAB>self.unselected = [] <TAB>self.extends = None <TAB>self.selected = sorted(self.resolved_selections) <TAB>self.resolved = True "
"<TAB><TAB><TAB>) <TAB><TAB><TAB>raise RuntimeError(msg)"
"def get( <TAB>self, <TAB>resource_group_name, # type: str <TAB>load_balancer_name, # type: str <TAB>backend_address_pool_name, # type: str <TAB>**kwargs # type: Any ): <TAB># type: (...) -> "_models.BackendAddressPool" <TAB>"""Gets load balancer backend address pool. <TAB>:param resource_group_name: The name of the resource group. <TAB>:type resource_group_name: str <TAB>:param load_balancer_name: The name of the load balancer. <TAB>:type load_balancer_name: str <TAB>:param backend_address_pool_name: The name of the backend address pool. <TAB>:type backend_address_pool_name: str <TAB>:keyword callable cls: A custom type or function that will be passed the direct response <TAB>:return: BackendAddressPool, or the result of cls(response) <TAB>:rtype: ~azure.mgmt.network.v2018_11_01.models.BackendAddressPool <TAB>:raises: ~azure.core.exceptions.HttpResponseError <TAB>""" <TAB>cls = kwargs.pop("cls", None) # type: ClsType["_models.BackendAddressPool"] <TAB>error_map = { <TAB><TAB>401: ClientAuthenticationError, <TAB><TAB>404: ResourceNotFoundError, <TAB><TAB>409: ResourceExistsError, <TAB>} <TAB>error_map.update(kwargs.pop("error_map", {})) <TAB>api_version = "2018-11-01" <TAB>accept = "application/json" <TAB># Construct URL <TAB>url = self.get.metadata["url"] # type: ignore <TAB>path_format_arguments = { <TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB>), <TAB><TAB>"loadBalancerName": self._serialize.url( <TAB><TAB><TAB>"load_balancer_name", load_balancer_name, "str" <TAB><TAB>), <TAB><TAB>"backendAddressPoolName": self._serialize.url( <TAB><TAB><TAB>"backend_address_pool_name", backend_address_pool_name, "str" <TAB><TAB>), <TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB>), <TAB>} <TAB>url = self._client.format_url(url, **path_format_arguments) <TAB># Construct parameters <TAB>query_parameters = {} # type: Dict[str, Any] <TAB>query_parameters["api-version"] = self._serialize.query( <TAB><TAB>"api_version", api_version, "str" <TAB>) <TAB># Construct headers <TAB>header_parameters = {} # type: Dict[str, Any] <TAB>header_parameters["Accept"] = self._serialize.header("accept", accept, "str") <TAB>request = self._client.get(url, query_parameters, header_parameters) <TAB>pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) <TAB>response = pipeline_response.http_response <TAB>if response.status_code not in [200]: <TAB><TAB>map_error( <TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <TAB><TAB>raise HttpResponseError(response=response, error_format=ARMErrorFormat) <TAB>deserialized = self._deserialize("BackendAddressPool", pipeline_response) <TAB>if cls: <TAB><TAB>return cls(pipeline_response, deserialized, {}) <TAB>return deserialized "
"<TAB><TAB>map_error( <TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <TAB><TAB>raise HttpResponseError(response=response, error_format=ARMErrorFormat)"
"<TAB><TAB>map_error( <MASK>"
"def get( <TAB>self, <TAB>resource_group_name, # type: str <TAB>load_balancer_name, # type: str <TAB>backend_address_pool_name, # type: str <TAB>**kwargs # type: Any ): <TAB># type: (...) -> "_models.BackendAddressPool" <TAB>"""Gets load balancer backend address pool. <TAB>:param resource_group_name: The name of the resource group. <TAB>:type resource_group_name: str <TAB>:param load_balancer_name: The name of the load balancer. <TAB>:type load_balancer_name: str <TAB>:param backend_address_pool_name: The name of the backend address pool. <TAB>:type backend_address_pool_name: str <TAB>:keyword callable cls: A custom type or function that will be passed the direct response <TAB>:return: BackendAddressPool, or the result of cls(response) <TAB>:rtype: ~azure.mgmt.network.v2018_11_01.models.BackendAddressPool <TAB>:raises: ~azure.core.exceptions.HttpResponseError <TAB>""" <TAB>cls = kwargs.pop("cls", None) # type: ClsType["_models.BackendAddressPool"] <TAB>error_map = { <TAB><TAB>401: ClientAuthenticationError, <TAB><TAB>404: ResourceNotFoundError, <TAB><TAB>409: ResourceExistsError, <TAB>} <TAB>error_map.update(kwargs.pop("error_map", {})) <TAB>api_version = "2018-11-01" <TAB>accept = "application/json" <TAB># Construct URL <TAB>url = self.get.metadata["url"] # type: ignore <TAB>path_format_arguments = { <TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB>), <TAB><TAB>"loadBalancerName": self._serialize.url( <TAB><TAB><TAB>"load_balancer_name", load_balancer_name, "str" <TAB><TAB>), <TAB><TAB>"backendAddressPoolName": self._serialize.url( <TAB><TAB><TAB>"backend_address_pool_name", backend_address_pool_name, "str" <TAB><TAB>), <TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB>), <TAB>} <TAB>url = self._client.format_url(url, **path_format_arguments) <TAB># Construct parameters <TAB>query_parameters = {} # type: Dict[str, Any] <TAB>query_parameters["api-version"] = self._serialize.query( <TAB><TAB>"api_version", api_version, "str" <TAB>) <TAB># Construct headers <TAB>header_parameters = {} # type: Dict[str, Any] <TAB>header_parameters["Accept"] = self._serialize.header("accept", accept, "str") <TAB>request = self._client.get(url, query_parameters, header_parameters) <TAB>pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) <TAB>response = pipeline_response.http_response <TAB>if response.status_code not in [200]: <TAB><TAB>map_error( <MASK> <TAB>deserialized = self._deserialize("BackendAddressPool", pipeline_response) <TAB>if cls: <TAB><TAB>return cls(pipeline_response, deserialized, {}) <TAB>return deserialized "
"<TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <TAB><TAB>raise HttpResponseError(response=response, error_format=ARMErrorFormat)"
"<TAB><TAB>map_error( <TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <MASK>"
"def get( <TAB>self, <TAB>resource_group_name, # type: str <TAB>load_balancer_name, # type: str <TAB>backend_address_pool_name, # type: str <TAB>**kwargs # type: Any ): <TAB># type: (...) -> "_models.BackendAddressPool" <TAB>"""Gets load balancer backend address pool. <TAB>:param resource_group_name: The name of the resource group. <TAB>:type resource_group_name: str <TAB>:param load_balancer_name: The name of the load balancer. <TAB>:type load_balancer_name: str <TAB>:param backend_address_pool_name: The name of the backend address pool. <TAB>:type backend_address_pool_name: str <TAB>:keyword callable cls: A custom type or function that will be passed the direct response <TAB>:return: BackendAddressPool, or the result of cls(response) <TAB>:rtype: ~azure.mgmt.network.v2018_11_01.models.BackendAddressPool <TAB>:raises: ~azure.core.exceptions.HttpResponseError <TAB>""" <TAB>cls = kwargs.pop("cls", None) # type: ClsType["_models.BackendAddressPool"] <TAB>error_map = { <TAB><TAB>401: ClientAuthenticationError, <TAB><TAB>404: ResourceNotFoundError, <TAB><TAB>409: ResourceExistsError, <TAB>} <TAB>error_map.update(kwargs.pop("error_map", {})) <TAB>api_version = "2018-11-01" <TAB>accept = "application/json" <TAB># Construct URL <TAB>url = self.get.metadata["url"] # type: ignore <TAB>path_format_arguments = { <TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB>), <TAB><TAB>"loadBalancerName": self._serialize.url( <TAB><TAB><TAB>"load_balancer_name", load_balancer_name, "str" <TAB><TAB>), <TAB><TAB>"backendAddressPoolName": self._serialize.url( <TAB><TAB><TAB>"backend_address_pool_name", backend_address_pool_name, "str" <TAB><TAB>), <TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB>), <TAB>} <TAB>url = self._client.format_url(url, **path_format_arguments) <TAB># Construct parameters <TAB>query_parameters = {} # type: Dict[str, Any] <TAB>query_parameters["api-version"] = self._serialize.query( <TAB><TAB>"api_version", api_version, "str" <TAB>) <TAB># Construct headers <TAB>header_parameters = {} # type: Dict[str, Any] <TAB>header_parameters["Accept"] = self._serialize.header("accept", accept, "str") <TAB>request = self._client.get(url, query_parameters, header_parameters) <TAB>pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) <TAB>response = pipeline_response.http_response <TAB>if response.status_code not in [200]: <TAB><TAB>map_error( <TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <MASK> <TAB>deserialized = self._deserialize("BackendAddressPool", pipeline_response) <TAB>if cls: <TAB><TAB>return cls(pipeline_response, deserialized, {}) <TAB>return deserialized "
"<TAB><TAB>raise HttpResponseError(response=response, error_format=ARMErrorFormat)"
"<TAB><TAB>map_error( <TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <TAB><TAB>raise<MASK>"
"def get( <TAB>self, <TAB>resource_group_name, # type: str <TAB>load_balancer_name, # type: str <TAB>backend_address_pool_name, # type: str <TAB>**kwargs # type: Any ): <TAB># type: (...) -> "_models.BackendAddressPool" <TAB>"""Gets load balancer backend address pool. <TAB>:param resource_group_name: The name of the resource group. <TAB>:type resource_group_name: str <TAB>:param load_balancer_name: The name of the load balancer. <TAB>:type load_balancer_name: str <TAB>:param backend_address_pool_name: The name of the backend address pool. <TAB>:type backend_address_pool_name: str <TAB>:keyword callable cls: A custom type or function that will be passed the direct response <TAB>:return: BackendAddressPool, or the result of cls(response) <TAB>:rtype: ~azure.mgmt.network.v2018_11_01.models.BackendAddressPool <TAB>:raises: ~azure.core.exceptions.HttpResponseError <TAB>""" <TAB>cls = kwargs.pop("cls", None) # type: ClsType["_models.BackendAddressPool"] <TAB>error_map = { <TAB><TAB>401: ClientAuthenticationError, <TAB><TAB>404: ResourceNotFoundError, <TAB><TAB>409: ResourceExistsError, <TAB>} <TAB>error_map.update(kwargs.pop("error_map", {})) <TAB>api_version = "2018-11-01" <TAB>accept = "application/json" <TAB># Construct URL <TAB>url = self.get.metadata["url"] # type: ignore <TAB>path_format_arguments = { <TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB>), <TAB><TAB>"loadBalancerName": self._serialize.url( <TAB><TAB><TAB>"load_balancer_name", load_balancer_name, "str" <TAB><TAB>), <TAB><TAB>"backendAddressPoolName": self._serialize.url( <TAB><TAB><TAB>"backend_address_pool_name", backend_address_pool_name, "str" <TAB><TAB>), <TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB>), <TAB>} <TAB>url = self._client.format_url(url, **path_format_arguments) <TAB># Construct parameters <TAB>query_parameters = {} # type: Dict[str, Any] <TAB>query_parameters["api-version"] = self._serialize.query( <TAB><TAB>"api_version", api_version, "str" <TAB>) <TAB># Construct headers <TAB>header_parameters = {} # type: Dict[str, Any] <TAB>header_parameters["Accept"] = self._serialize.header("accept", accept, "str") <TAB>request = self._client.get(url, query_parameters, header_parameters) <TAB>pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) <TAB>response = pipeline_response.http_response <TAB>if response.status_code not in [200]: <TAB><TAB>map_error( <TAB><TAB><TAB>status_code=response.status_code, response=response, error_map=error_map <TAB><TAB>) <TAB><TAB>raise<MASK> <TAB>deserialized = self._deserialize("BackendAddressPool", pipeline_response) <TAB>if cls: <TAB><TAB>return cls(pipeline_response, deserialized, {}) <TAB>return deserialized "
"HttpResponseError(response=response, error_format=ARMErrorFormat)"
"def __init__(self, output_path="output", weight_path=None, use_adjust_brightness=True): <TAB>self.output_path = output_path <TAB>self.input_size = (256, 256) <TAB>self.use_adjust_brightness = use_adjust_brightness <TAB>if weight_path is None: <TAB><TAB>vox_cpk_weight_url = ( <TAB><TAB><TAB>"https://paddlegan.bj.bcebos.com/models/animeganv2_hayao.pdparams" <TAB><TAB>) <TAB><TAB>weight_path = get_path_from_url(vox_cpk_weight_url) <TAB>self.weight_path = weight_path <TAB>self.generator = self.load_checkpoints() <TAB>self.transform = T.Compose( <TAB><TAB>[ <TAB><TAB><TAB>ResizeToScale((256, 256), 32), <TAB><TAB><TAB>T.Transpose(), <TAB><TAB><TAB>T.Normalize([127.5, 127.5, 127.5], [127.5, 127.5, 127.5]), <TAB><TAB>] <TAB>) "
"<TAB><TAB>vox_cpk_weight_url = ( <TAB><TAB><TAB>"https://paddlegan.bj.bcebos.com/models/animeganv2_hayao.pdparams" <TAB><TAB>) <TAB><TAB>weight_path = get_path_from_url(vox_cpk_weight_url)"
"<TAB><TAB>vox_cpk_weight_url = ( <MASK>"
"def __init__(self, output_path="output", weight_path=None, use_adjust_brightness=True): <TAB>self.output_path = output_path <TAB>self.input_size = (256, 256) <TAB>self.use_adjust_brightness = use_adjust_brightness <TAB>if weight_path is None: <TAB><TAB>vox_cpk_weight_url = ( <MASK> <TAB>self.weight_path = weight_path <TAB>self.generator = self.load_checkpoints() <TAB>self.transform = T.Compose( <TAB><TAB>[ <TAB><TAB><TAB>ResizeToScale((256, 256), 32), <TAB><TAB><TAB>T.Transpose(), <TAB><TAB><TAB>T.Normalize([127.5, 127.5, 127.5], [127.5, 127.5, 127.5]), <TAB><TAB>] <TAB>) "
"<TAB><TAB><TAB>"https://paddlegan.bj.bcebos.com/models/animeganv2_hayao.pdparams" <TAB><TAB>) <TAB><TAB>weight_path = get_path_from_url(vox_cpk_weight_url)"
"<TAB><TAB>vox_cpk_weight_url = ( <TAB><TAB><TAB>"https://paddlegan.bj.bcebos.com/models/animeganv2_hayao.pdparams" <TAB><TAB>) <MASK>"
"def __init__(self, output_path="output", weight_path=None, use_adjust_brightness=True): <TAB>self.output_path = output_path <TAB>self.input_size = (256, 256) <TAB>self.use_adjust_brightness = use_adjust_brightness <TAB>if weight_path is None: <TAB><TAB>vox_cpk_weight_url = ( <TAB><TAB><TAB>"https://paddlegan.bj.bcebos.com/models/animeganv2_hayao.pdparams" <TAB><TAB>) <MASK> <TAB>self.weight_path = weight_path <TAB>self.generator = self.load_checkpoints() <TAB>self.transform = T.Compose( <TAB><TAB>[ <TAB><TAB><TAB>ResizeToScale((256, 256), 32), <TAB><TAB><TAB>T.Transpose(), <TAB><TAB><TAB>T.Normalize([127.5, 127.5, 127.5], [127.5, 127.5, 127.5]), <TAB><TAB>] <TAB>) "
"<TAB><TAB>weight_path = get_path_from_url(vox_cpk_weight_url)"
"<TAB><TAB>vox_cpk_weight_url = ( <TAB><TAB><TAB>"https://paddlegan.bj.bcebos.com/models/animeganv2_hayao.pdparams" <TAB><TAB>) <TAB><TAB>weight_path =<MASK>"
"def __init__(self, output_path="output", weight_path=None, use_adjust_brightness=True): <TAB>self.output_path = output_path <TAB>self.input_size = (256, 256) <TAB>self.use_adjust_brightness = use_adjust_brightness <TAB>if weight_path is None: <TAB><TAB>vox_cpk_weight_url = ( <TAB><TAB><TAB>"https://paddlegan.bj.bcebos.com/models/animeganv2_hayao.pdparams" <TAB><TAB>) <TAB><TAB>weight_path =<MASK> <TAB>self.weight_path = weight_path <TAB>self.generator = self.load_checkpoints() <TAB>self.transform = T.Compose( <TAB><TAB>[ <TAB><TAB><TAB>ResizeToScale((256, 256), 32), <TAB><TAB><TAB>T.Transpose(), <TAB><TAB><TAB>T.Normalize([127.5, 127.5, 127.5], [127.5, 127.5, 127.5]), <TAB><TAB>] <TAB>) "
"get_path_from_url(vox_cpk_weight_url)"
"def eat_descriptor(descr): <TAB>""" <TAB>Read head of a field/method descriptor. Returns a pair of strings, where <TAB>the first one is a human-readable string representation of the first found <TAB>type, and the second one is the tail of the parameter. <TAB>""" <TAB>array_dim = 0 <TAB>while descr[0] == "[": <TAB><TAB>array_dim += 1 <TAB><TAB>descr = descr[1:] <TAB>if descr[0] == "L": <TAB><TAB>try: <TAB><TAB><TAB>end = descr.find(";") <TAB><TAB>except: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: " + descr) <TAB><TAB>type = descr[1:end] <TAB><TAB>descr = descr[end:] <TAB>else: <TAB><TAB>global code_to_type_name <TAB><TAB>try: <TAB><TAB><TAB>type = code_to_type_name[descr[0]] <TAB><TAB>except KeyError: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: %s" % descr) <TAB>return (type.replace("/", ".") + array_dim * "[]", descr[1:]) "
"<TAB><TAB>try: <TAB><TAB><TAB>end = descr.find(";") <TAB><TAB>except: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: " + descr) <TAB><TAB>type = descr[1:end] <TAB><TAB>descr = descr[end:]"
"<TAB><TAB>try: <TAB><TAB><TAB>end =<MASK>"
"def eat_descriptor(descr): <TAB>""" <TAB>Read head of a field/method descriptor. Returns a pair of strings, where <TAB>the first one is a human-readable string representation of the first found <TAB>type, and the second one is the tail of the parameter. <TAB>""" <TAB>array_dim = 0 <TAB>while descr[0] == "[": <TAB><TAB>array_dim += 1 <TAB><TAB>descr = descr[1:] <TAB>if descr[0] == "L": <TAB><TAB>try: <TAB><TAB><TAB>end =<MASK> <TAB>else: <TAB><TAB>global code_to_type_name <TAB><TAB>try: <TAB><TAB><TAB>type = code_to_type_name[descr[0]] <TAB><TAB>except KeyError: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: %s" % descr) <TAB>return (type.replace("/", ".") + array_dim * "[]", descr[1:]) "
"descr.find(";") <TAB><TAB>except: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: " + descr) <TAB><TAB>type = descr[1:end] <TAB><TAB>descr = descr[end:]"
"<TAB><TAB>try: <TAB><TAB><TAB>end = descr.find(";") <TAB><TAB>except: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: " + descr) <MASK>"
"def eat_descriptor(descr): <TAB>""" <TAB>Read head of a field/method descriptor. Returns a pair of strings, where <TAB>the first one is a human-readable string representation of the first found <TAB>type, and the second one is the tail of the parameter. <TAB>""" <TAB>array_dim = 0 <TAB>while descr[0] == "[": <TAB><TAB>array_dim += 1 <TAB><TAB>descr = descr[1:] <TAB>if descr[0] == "L": <TAB><TAB>try: <TAB><TAB><TAB>end = descr.find(";") <TAB><TAB>except: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: " + descr) <MASK> <TAB>else: <TAB><TAB>global code_to_type_name <TAB><TAB>try: <TAB><TAB><TAB>type = code_to_type_name[descr[0]] <TAB><TAB>except KeyError: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: %s" % descr) <TAB>return (type.replace("/", ".") + array_dim * "[]", descr[1:]) "
"<TAB><TAB>type = descr[1:end] <TAB><TAB>descr = descr[end:]"
"<TAB><TAB>try: <TAB><TAB><TAB>end = descr.find(";") <TAB><TAB>except: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: " + descr) <TAB><TAB>type =<MASK>"
"def eat_descriptor(descr): <TAB>""" <TAB>Read head of a field/method descriptor. Returns a pair of strings, where <TAB>the first one is a human-readable string representation of the first found <TAB>type, and the second one is the tail of the parameter. <TAB>""" <TAB>array_dim = 0 <TAB>while descr[0] == "[": <TAB><TAB>array_dim += 1 <TAB><TAB>descr = descr[1:] <TAB>if descr[0] == "L": <TAB><TAB>try: <TAB><TAB><TAB>end = descr.find(";") <TAB><TAB>except: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: " + descr) <TAB><TAB>type =<MASK> <TAB>else: <TAB><TAB>global code_to_type_name <TAB><TAB>try: <TAB><TAB><TAB>type = code_to_type_name[descr[0]] <TAB><TAB>except KeyError: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: %s" % descr) <TAB>return (type.replace("/", ".") + array_dim * "[]", descr[1:]) "
"descr[1:end] <TAB><TAB>descr = descr[end:]"
"def eat_descriptor(descr): <TAB>""" <TAB>Read head of a field/method descriptor. Returns a pair of strings, where <TAB>the first one is a human-readable string representation of the first found <TAB>type, and the second one is the tail of the parameter. <TAB>""" <TAB>array_dim = 0 <TAB>while descr[0] == "[": <TAB><TAB>array_dim += 1 <TAB><TAB>descr = descr[1:] <TAB>if descr[0] == "L": <TAB><TAB>try: <TAB><TAB><TAB>end = descr.find(";") <TAB><TAB>except: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: " + descr) <TAB><TAB>type = descr[1:end] <TAB><TAB>descr = descr[end:] <TAB>else: <TAB><TAB>global code_to_type_name <TAB><TAB>try: <TAB><TAB><TAB>type = code_to_type_name[descr[0]] <TAB><TAB>except KeyError: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: %s" % descr) <TAB>return (type.replace("/", ".") + array_dim * "[]", descr[1:]) "
"<TAB><TAB>global code_to_type_name <TAB><TAB>try: <TAB><TAB><TAB>type = code_to_type_name[descr[0]] <TAB><TAB>except KeyError: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: %s" % descr)"
"<TAB><TAB>global code_to_type_name <MASK>"
"def eat_descriptor(descr): <TAB>""" <TAB>Read head of a field/method descriptor. Returns a pair of strings, where <TAB>the first one is a human-readable string representation of the first found <TAB>type, and the second one is the tail of the parameter. <TAB>""" <TAB>array_dim = 0 <TAB>while descr[0] == "[": <TAB><TAB>array_dim += 1 <TAB><TAB>descr = descr[1:] <TAB>if descr[0] == "L": <TAB><TAB>try: <TAB><TAB><TAB>end = descr.find(";") <TAB><TAB>except: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: " + descr) <TAB><TAB>type = descr[1:end] <TAB><TAB>descr = descr[end:] <TAB>else: <TAB><TAB>global code_to_type_name <MASK> <TAB>return (type.replace("/", ".") + array_dim * "[]", descr[1:]) "
"<TAB><TAB>try: <TAB><TAB><TAB>type = code_to_type_name[descr[0]] <TAB><TAB>except KeyError: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: %s" % descr)"
"<TAB><TAB>global code_to_type_name <TAB><TAB>try: <TAB><TAB><TAB>type = code_to_type_name[descr[0]] <TAB><TAB>except<MASK>"
"def eat_descriptor(descr): <TAB>""" <TAB>Read head of a field/method descriptor. Returns a pair of strings, where <TAB>the first one is a human-readable string representation of the first found <TAB>type, and the second one is the tail of the parameter. <TAB>""" <TAB>array_dim = 0 <TAB>while descr[0] == "[": <TAB><TAB>array_dim += 1 <TAB><TAB>descr = descr[1:] <TAB>if descr[0] == "L": <TAB><TAB>try: <TAB><TAB><TAB>end = descr.find(";") <TAB><TAB>except: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: " + descr) <TAB><TAB>type = descr[1:end] <TAB><TAB>descr = descr[end:] <TAB>else: <TAB><TAB>global code_to_type_name <TAB><TAB>try: <TAB><TAB><TAB>type = code_to_type_name[descr[0]] <TAB><TAB>except<MASK> <TAB>return (type.replace("/", ".") + array_dim * "[]", descr[1:]) "
"KeyError: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: %s" % descr)"
"<TAB><TAB>global code_to_type_name <TAB><TAB>try: <TAB><TAB><TAB>type = code_to_type_name[descr[0]] <TAB><TAB>except KeyError: <TAB><TAB><TAB>raise<MASK>"
"def eat_descriptor(descr): <TAB>""" <TAB>Read head of a field/method descriptor. Returns a pair of strings, where <TAB>the first one is a human-readable string representation of the first found <TAB>type, and the second one is the tail of the parameter. <TAB>""" <TAB>array_dim = 0 <TAB>while descr[0] == "[": <TAB><TAB>array_dim += 1 <TAB><TAB>descr = descr[1:] <TAB>if descr[0] == "L": <TAB><TAB>try: <TAB><TAB><TAB>end = descr.find(";") <TAB><TAB>except: <TAB><TAB><TAB>raise ParserError("Not a valid descriptor string: " + descr) <TAB><TAB>type = descr[1:end] <TAB><TAB>descr = descr[end:] <TAB>else: <TAB><TAB>global code_to_type_name <TAB><TAB>try: <TAB><TAB><TAB>type = code_to_type_name[descr[0]] <TAB><TAB>except KeyError: <TAB><TAB><TAB>raise<MASK> <TAB>return (type.replace("/", ".") + array_dim * "[]", descr[1:]) "
"ParserError("Not a valid descriptor string: %s" % descr)"
"def test_hash(self): <TAB>self.fs.makedir("foo").writebytes("hashme.txt", b"foobar" * 1024) <TAB>self.assertEqual( <TAB><TAB>self.fs.hash("foo/hashme.txt", "md5"), "9fff4bb103ab8ce4619064109c54cb9c" <TAB>) <TAB>with self.assertRaises(errors.UnsupportedHash): <TAB><TAB>self.fs.hash("foo/hashme.txt", "nohash") <TAB>with self.fs.opendir("foo") as foo_fs: <TAB><TAB>self.assertEqual( <TAB><TAB><TAB>foo_fs.hash("hashme.txt", "md5"), "9fff4bb103ab8ce4619064109c54cb9c" <TAB><TAB>) "
"<TAB><TAB>self.assertEqual( <TAB><TAB><TAB>foo_fs.hash("hashme.txt", "md5"), "9fff4bb103ab8ce4619064109c54cb9c" <TAB><TAB>)"
"<MASK>"
"def test_hash(self): <TAB>self.fs.makedir("foo").writebytes("hashme.txt", b"foobar" * 1024) <TAB>self.assertEqual( <TAB><TAB>self.fs.hash("foo/hashme.txt", "md5"), "9fff4bb103ab8ce4619064109c54cb9c" <TAB>) <TAB>with self.assertRaises(errors.UnsupportedHash): <TAB><TAB>self.fs.hash("foo/hashme.txt", "nohash") <TAB>with self.fs.opendir("foo") as foo_fs: <MASK> "
"<TAB><TAB>self.assertEqual( <TAB><TAB><TAB>foo_fs.hash("hashme.txt", "md5"), "9fff4bb103ab8ce4619064109c54cb9c" <TAB><TAB>)"
"<TAB><TAB>self.assertEqual( <TAB><TAB><TAB>foo_fs.hash("hashme.txt", "md5"),<MASK>"
"def test_hash(self): <TAB>self.fs.makedir("foo").writebytes("hashme.txt", b"foobar" * 1024) <TAB>self.assertEqual( <TAB><TAB>self.fs.hash("foo/hashme.txt", "md5"), "9fff4bb103ab8ce4619064109c54cb9c" <TAB>) <TAB>with self.assertRaises(errors.UnsupportedHash): <TAB><TAB>self.fs.hash("foo/hashme.txt", "nohash") <TAB>with self.fs.opendir("foo") as foo_fs: <TAB><TAB>self.assertEqual( <TAB><TAB><TAB>foo_fs.hash("hashme.txt", "md5"),<MASK> "
""9fff4bb103ab8ce4619064109c54cb9c" <TAB><TAB>)"
"<TAB><TAB>self.assertEqual( <TAB><TAB><TAB>foo_fs.hash("hashme.txt", "md5"),<MASK>"
"def test_hash(self): <TAB>self.fs.makedir("foo").writebytes("hashme.txt", b"foobar" * 1024) <TAB>self.assertEqual( <TAB><TAB>self.fs.hash("foo/hashme.txt", "md5"), "9fff4bb103ab8ce4619064109c54cb9c" <TAB>) <TAB>with self.assertRaises(errors.UnsupportedHash): <TAB><TAB>self.fs.hash("foo/hashme.txt", "nohash") <TAB>with self.fs.opendir("foo") as foo_fs: <TAB><TAB>self.assertEqual( <TAB><TAB><TAB>foo_fs.hash("hashme.txt", "md5"),<MASK> "
""9fff4bb103ab8ce4619064109c54cb9c" <TAB><TAB>)"
"def recolorize(self): <TAB>self.after_id = None <TAB>if not self.delegate: <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("no delegate") <TAB><TAB>return <TAB>if not self.allow_colorizing: <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("auto colorizing is off") <TAB><TAB>return <TAB>if self.colorizing: <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("already colorizing") <TAB><TAB>return <TAB>try: <TAB><TAB>self.stop_colorizing = False <TAB><TAB>self.colorizing = True <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("colorizing...") <TAB><TAB>t0 = time.perf_counter() <TAB><TAB>self.recolorize_main() <TAB><TAB>t1 = time.perf_counter() <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("%.3f seconds" % (t1 - t0)) <TAB>finally: <TAB><TAB>self.colorizing = False <TAB>if self.allow_colorizing and self.tag_nextrange("TODO", "1.0"): <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("reschedule colorizing") <TAB><TAB>self.after_id = self.after(1, self.recolorize) <TAB>if self.close_when_done: <TAB><TAB>top = self.close_when_done <TAB><TAB>self.close_when_done = None <TAB><TAB>top.destroy() "
"<TAB><TAB>self.stop_colorizing = False <TAB><TAB>self.colorizing = True <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("colorizing...") <TAB><TAB>t0 = time.perf_counter() <TAB><TAB>self.recolorize_main() <TAB><TAB>t1 = time.perf_counter() <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("%.3f seconds" % (t1 - t0))"
"<TAB><TAB>self.stop_colorizing = False <TAB><TAB>self.colorizing = True <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("colorizing...") <TAB><TAB>t0 = time.perf_counter() <MASK>"
"def recolorize(self): <TAB>self.after_id = None <TAB>if not self.delegate: <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("no delegate") <TAB><TAB>return <TAB>if not self.allow_colorizing: <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("auto colorizing is off") <TAB><TAB>return <TAB>if self.colorizing: <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("already colorizing") <TAB><TAB>return <TAB>try: <TAB><TAB>self.stop_colorizing = False <TAB><TAB>self.colorizing = True <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("colorizing...") <TAB><TAB>t0 = time.perf_counter() <MASK> <TAB>finally: <TAB><TAB>self.colorizing = False <TAB>if self.allow_colorizing and self.tag_nextrange("TODO", "1.0"): <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("reschedule colorizing") <TAB><TAB>self.after_id = self.after(1, self.recolorize) <TAB>if self.close_when_done: <TAB><TAB>top = self.close_when_done <TAB><TAB>self.close_when_done = None <TAB><TAB>top.destroy() "
"<TAB><TAB>self.recolorize_main() <TAB><TAB>t1 = time.perf_counter() <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("%.3f seconds" % (t1 - t0))"
"<TAB><TAB>self.stop_colorizing = False <TAB><TAB>self.colorizing = True <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("colorizing...") <TAB><TAB>t0 = time.perf_counter() <TAB><TAB>self.recolorize_main() <TAB><TAB>t1 = time.perf_counter() <TAB><TAB>if<MASK>"
"def recolorize(self): <TAB>self.after_id = None <TAB>if not self.delegate: <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("no delegate") <TAB><TAB>return <TAB>if not self.allow_colorizing: <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("auto colorizing is off") <TAB><TAB>return <TAB>if self.colorizing: <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("already colorizing") <TAB><TAB>return <TAB>try: <TAB><TAB>self.stop_colorizing = False <TAB><TAB>self.colorizing = True <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("colorizing...") <TAB><TAB>t0 = time.perf_counter() <TAB><TAB>self.recolorize_main() <TAB><TAB>t1 = time.perf_counter() <TAB><TAB>if<MASK> <TAB>finally: <TAB><TAB>self.colorizing = False <TAB>if self.allow_colorizing and self.tag_nextrange("TODO", "1.0"): <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("reschedule colorizing") <TAB><TAB>self.after_id = self.after(1, self.recolorize) <TAB>if self.close_when_done: <TAB><TAB>top = self.close_when_done <TAB><TAB>self.close_when_done = None <TAB><TAB>top.destroy() "
"DEBUG: <TAB><TAB><TAB>print("%.3f seconds" % (t1 - t0))"
"<TAB><TAB>self.stop_colorizing = False <TAB><TAB>self.colorizing = True <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("colorizing...") <TAB><TAB>t0 = time.perf_counter() <TAB><TAB>self.recolorize_main() <TAB><TAB>t1 = time.perf_counter() <TAB><TAB>if DEBUG: <MASK>"
"def recolorize(self): <TAB>self.after_id = None <TAB>if not self.delegate: <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("no delegate") <TAB><TAB>return <TAB>if not self.allow_colorizing: <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("auto colorizing is off") <TAB><TAB>return <TAB>if self.colorizing: <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("already colorizing") <TAB><TAB>return <TAB>try: <TAB><TAB>self.stop_colorizing = False <TAB><TAB>self.colorizing = True <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("colorizing...") <TAB><TAB>t0 = time.perf_counter() <TAB><TAB>self.recolorize_main() <TAB><TAB>t1 = time.perf_counter() <TAB><TAB>if DEBUG: <MASK> <TAB>finally: <TAB><TAB>self.colorizing = False <TAB>if self.allow_colorizing and self.tag_nextrange("TODO", "1.0"): <TAB><TAB>if DEBUG: <TAB><TAB><TAB>print("reschedule colorizing") <TAB><TAB>self.after_id = self.after(1, self.recolorize) <TAB>if self.close_when_done: <TAB><TAB>top = self.close_when_done <TAB><TAB>self.close_when_done = None <TAB><TAB>top.destroy() "
"<TAB><TAB><TAB>print("%.3f seconds" % (t1 - t0))"
"def __init__(self, buf, batch_size, compression_type=None): <TAB>if compression_type is not None: <TAB><TAB>assert compression_type in self._COMPRESSORS, "Unrecognized compression type" <TAB><TAB>checker, encoder, attributes = self._COMPRESSORS[compression_type] <TAB><TAB>assert checker(), "Compression Libraries Not Found" <TAB><TAB>self._compressor = encoder <TAB><TAB>self._compression_attributes = attributes <TAB>else: <TAB><TAB>self._compressor = None <TAB><TAB>self._compression_attributes = None <TAB>self._buffer = buf <TAB># Init MessageSetSize to 0 -- update on close <TAB>self._buffer.seek(0) <TAB>self._buffer.write(Int32.encode(0)) <TAB>self._batch_size = batch_size <TAB>self._closed = False <TAB>self._messages = 0 "
"<TAB><TAB>assert compression_type in self._COMPRESSORS, "Unrecognized compression type" <TAB><TAB>checker, encoder, attributes = self._COMPRESSORS[compression_type] <TAB><TAB>assert checker(), "Compression Libraries Not Found" <TAB><TAB>self._compressor = encoder <TAB><TAB>self._compression_attributes = attributes"
"<TAB><TAB>assert compression_type in self._COMPRESSORS, "Unrecognized compression type" <MASK>"
"def __init__(self, buf, batch_size, compression_type=None): <TAB>if compression_type is not None: <TAB><TAB>assert compression_type in self._COMPRESSORS, "Unrecognized compression type" <MASK> <TAB>else: <TAB><TAB>self._compressor = None <TAB><TAB>self._compression_attributes = None <TAB>self._buffer = buf <TAB># Init MessageSetSize to 0 -- update on close <TAB>self._buffer.seek(0) <TAB>self._buffer.write(Int32.encode(0)) <TAB>self._batch_size = batch_size <TAB>self._closed = False <TAB>self._messages = 0 "
"<TAB><TAB>checker, encoder, attributes = self._COMPRESSORS[compression_type] <TAB><TAB>assert checker(), "Compression Libraries Not Found" <TAB><TAB>self._compressor = encoder <TAB><TAB>self._compression_attributes = attributes"
"<TAB><TAB>assert compression_type in self._COMPRESSORS, "Unrecognized compression type" <TAB><TAB>checker, encoder, attributes = self._COMPRESSORS[compression_type] <TAB><TAB>assert checker(), "Compression Libraries Not<MASK>"
"def __init__(self, buf, batch_size, compression_type=None): <TAB>if compression_type is not None: <TAB><TAB>assert compression_type in self._COMPRESSORS, "Unrecognized compression type" <TAB><TAB>checker, encoder, attributes = self._COMPRESSORS[compression_type] <TAB><TAB>assert checker(), "Compression Libraries Not<MASK> <TAB>else: <TAB><TAB>self._compressor = None <TAB><TAB>self._compression_attributes = None <TAB>self._buffer = buf <TAB># Init MessageSetSize to 0 -- update on close <TAB>self._buffer.seek(0) <TAB>self._buffer.write(Int32.encode(0)) <TAB>self._batch_size = batch_size <TAB>self._closed = False <TAB>self._messages = 0 "
"Found" <TAB><TAB>self._compressor = encoder <TAB><TAB>self._compression_attributes = attributes"
"<TAB><TAB>assert compression_type in self._COMPRESSORS, "Unrecognized compression type" <TAB><TAB>checker, encoder, attributes = self._COMPRESSORS[compression_type] <TAB><TAB>assert checker(), "Compression Libraries Not Found" <TAB><TAB>self._compressor<MASK>"
"def __init__(self, buf, batch_size, compression_type=None): <TAB>if compression_type is not None: <TAB><TAB>assert compression_type in self._COMPRESSORS, "Unrecognized compression type" <TAB><TAB>checker, encoder, attributes = self._COMPRESSORS[compression_type] <TAB><TAB>assert checker(), "Compression Libraries Not Found" <TAB><TAB>self._compressor<MASK> <TAB>else: <TAB><TAB>self._compressor = None <TAB><TAB>self._compression_attributes = None <TAB>self._buffer = buf <TAB># Init MessageSetSize to 0 -- update on close <TAB>self._buffer.seek(0) <TAB>self._buffer.write(Int32.encode(0)) <TAB>self._batch_size = batch_size <TAB>self._closed = False <TAB>self._messages = 0 "
"= encoder <TAB><TAB>self._compression_attributes = attributes"
"def catalogs_ajax(env, compare): <TAB>"""Server data to catalogs as JSON to Jquery datatables""" <TAB>draw = int(request.args.get("draw", 0)) <TAB>start = int(request.args.get("start", 0)) <TAB>length = int(request.args.get("length", app.config["NORMAL_TABLE_COUNT"])) <TAB>paging_args = {"limit": length, "offset": start} <TAB>search_arg = request.args.get("search[value]") <TAB>order_column = int(request.args.get("order[0][column]", 0)) <TAB>order_filter = CATALOGS_COLUMNS[order_column].get( <TAB><TAB>"filter", CATALOGS_COLUMNS[order_column]["attr"] <TAB>) <TAB>order_dir = request.args.get("order[0][dir]", "asc") <TAB>order_args = '[{"field": "%s", "order": "%s"}]' % (order_filter, order_dir) <TAB>envs = environments() <TAB>check_env(env, envs) <TAB>query = AndOperator() <TAB>if env != "*": <TAB><TAB>query.add(EqualsOperator("catalog_environment", env)) <TAB>if search_arg: <TAB><TAB>query.add(RegexOperator("certname", r"%s" % search_arg)) <TAB>query.add(NullOperator("catalog_timestamp", False)) <TAB>nodes = get_or_abort( <TAB><TAB>puppetdb.nodes, <TAB><TAB>query=query, <TAB><TAB>include_total=True, <TAB><TAB>order_by=order_args, <TAB><TAB>**paging_args <TAB>) <TAB>catalog_list = [] <TAB>total = None <TAB>for node in nodes: <TAB><TAB>if total is None: <TAB><TAB><TAB>total = puppetdb.total <TAB><TAB>catalog_list.append( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"certname": node.name, <TAB><TAB><TAB><TAB>"catalog_timestamp": node.catalog_timestamp, <TAB><TAB><TAB><TAB>"form": compare, <TAB><TAB><TAB>} <TAB><TAB>) <TAB>if total is None: <TAB><TAB>total = 0 <TAB>return render_template( <TAB><TAB>"catalogs.json.tpl", <TAB><TAB>total=total, <TAB><TAB>total_filtered=total, <TAB><TAB>draw=draw, <TAB><TAB>columns=CATALOGS_COLUMNS, <TAB><TAB>catalogs=catalog_list, <TAB><TAB>envs=envs, <TAB><TAB>current_env=env, <TAB>) "
"<TAB><TAB>if total is None: <TAB><TAB><TAB>total = puppetdb.total <TAB><TAB>catalog_list.append( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"certname": node.name, <TAB><TAB><TAB><TAB>"catalog_timestamp": node.catalog_timestamp, <TAB><TAB><TAB><TAB>"form": compare, <TAB><TAB><TAB>} <TAB><TAB>)"
"<TAB><TAB>if total is None: <TAB><TAB><TAB>total = puppetdb.total <TAB><TAB>catalog_list.append( <TAB><TAB><TAB>{ <MASK>"
"def catalogs_ajax(env, compare): <TAB>"""Server data to catalogs as JSON to Jquery datatables""" <TAB>draw = int(request.args.get("draw", 0)) <TAB>start = int(request.args.get("start", 0)) <TAB>length = int(request.args.get("length", app.config["NORMAL_TABLE_COUNT"])) <TAB>paging_args = {"limit": length, "offset": start} <TAB>search_arg = request.args.get("search[value]") <TAB>order_column = int(request.args.get("order[0][column]", 0)) <TAB>order_filter = CATALOGS_COLUMNS[order_column].get( <TAB><TAB>"filter", CATALOGS_COLUMNS[order_column]["attr"] <TAB>) <TAB>order_dir = request.args.get("order[0][dir]", "asc") <TAB>order_args = '[{"field": "%s", "order": "%s"}]' % (order_filter, order_dir) <TAB>envs = environments() <TAB>check_env(env, envs) <TAB>query = AndOperator() <TAB>if env != "*": <TAB><TAB>query.add(EqualsOperator("catalog_environment", env)) <TAB>if search_arg: <TAB><TAB>query.add(RegexOperator("certname", r"%s" % search_arg)) <TAB>query.add(NullOperator("catalog_timestamp", False)) <TAB>nodes = get_or_abort( <TAB><TAB>puppetdb.nodes, <TAB><TAB>query=query, <TAB><TAB>include_total=True, <TAB><TAB>order_by=order_args, <TAB><TAB>**paging_args <TAB>) <TAB>catalog_list = [] <TAB>total = None <TAB>for node in nodes: <TAB><TAB>if total is None: <TAB><TAB><TAB>total = puppetdb.total <TAB><TAB>catalog_list.append( <TAB><TAB><TAB>{ <MASK> <TAB>if total is None: <TAB><TAB>total = 0 <TAB>return render_template( <TAB><TAB>"catalogs.json.tpl", <TAB><TAB>total=total, <TAB><TAB>total_filtered=total, <TAB><TAB>draw=draw, <TAB><TAB>columns=CATALOGS_COLUMNS, <TAB><TAB>catalogs=catalog_list, <TAB><TAB>envs=envs, <TAB><TAB>current_env=env, <TAB>) "
"<TAB><TAB><TAB><TAB>"certname": node.name, <TAB><TAB><TAB><TAB>"catalog_timestamp": node.catalog_timestamp, <TAB><TAB><TAB><TAB>"form": compare, <TAB><TAB><TAB>} <TAB><TAB>)"
"<TAB><TAB>if total is None: <TAB><TAB><TAB>total = puppetdb.total <TAB><TAB>catalog_list.append( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"certname": node.name, <TAB><TAB><TAB><TAB>"catalog_timestamp":<MASK>"
"def catalogs_ajax(env, compare): <TAB>"""Server data to catalogs as JSON to Jquery datatables""" <TAB>draw = int(request.args.get("draw", 0)) <TAB>start = int(request.args.get("start", 0)) <TAB>length = int(request.args.get("length", app.config["NORMAL_TABLE_COUNT"])) <TAB>paging_args = {"limit": length, "offset": start} <TAB>search_arg = request.args.get("search[value]") <TAB>order_column = int(request.args.get("order[0][column]", 0)) <TAB>order_filter = CATALOGS_COLUMNS[order_column].get( <TAB><TAB>"filter", CATALOGS_COLUMNS[order_column]["attr"] <TAB>) <TAB>order_dir = request.args.get("order[0][dir]", "asc") <TAB>order_args = '[{"field": "%s", "order": "%s"}]' % (order_filter, order_dir) <TAB>envs = environments() <TAB>check_env(env, envs) <TAB>query = AndOperator() <TAB>if env != "*": <TAB><TAB>query.add(EqualsOperator("catalog_environment", env)) <TAB>if search_arg: <TAB><TAB>query.add(RegexOperator("certname", r"%s" % search_arg)) <TAB>query.add(NullOperator("catalog_timestamp", False)) <TAB>nodes = get_or_abort( <TAB><TAB>puppetdb.nodes, <TAB><TAB>query=query, <TAB><TAB>include_total=True, <TAB><TAB>order_by=order_args, <TAB><TAB>**paging_args <TAB>) <TAB>catalog_list = [] <TAB>total = None <TAB>for node in nodes: <TAB><TAB>if total is None: <TAB><TAB><TAB>total = puppetdb.total <TAB><TAB>catalog_list.append( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"certname": node.name, <TAB><TAB><TAB><TAB>"catalog_timestamp":<MASK> <TAB>if total is None: <TAB><TAB>total = 0 <TAB>return render_template( <TAB><TAB>"catalogs.json.tpl", <TAB><TAB>total=total, <TAB><TAB>total_filtered=total, <TAB><TAB>draw=draw, <TAB><TAB>columns=CATALOGS_COLUMNS, <TAB><TAB>catalogs=catalog_list, <TAB><TAB>envs=envs, <TAB><TAB>current_env=env, <TAB>) "
"node.catalog_timestamp, <TAB><TAB><TAB><TAB>"form": compare, <TAB><TAB><TAB>} <TAB><TAB>)"
"<TAB><TAB>if total is None: <TAB><TAB><TAB>total = puppetdb.total <TAB><TAB>catalog_list.append( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"certname": node.name, <TAB><TAB><TAB><TAB>"catalog_timestamp": node.catalog_timestamp, <MASK>"
"def catalogs_ajax(env, compare): <TAB>"""Server data to catalogs as JSON to Jquery datatables""" <TAB>draw = int(request.args.get("draw", 0)) <TAB>start = int(request.args.get("start", 0)) <TAB>length = int(request.args.get("length", app.config["NORMAL_TABLE_COUNT"])) <TAB>paging_args = {"limit": length, "offset": start} <TAB>search_arg = request.args.get("search[value]") <TAB>order_column = int(request.args.get("order[0][column]", 0)) <TAB>order_filter = CATALOGS_COLUMNS[order_column].get( <TAB><TAB>"filter", CATALOGS_COLUMNS[order_column]["attr"] <TAB>) <TAB>order_dir = request.args.get("order[0][dir]", "asc") <TAB>order_args = '[{"field": "%s", "order": "%s"}]' % (order_filter, order_dir) <TAB>envs = environments() <TAB>check_env(env, envs) <TAB>query = AndOperator() <TAB>if env != "*": <TAB><TAB>query.add(EqualsOperator("catalog_environment", env)) <TAB>if search_arg: <TAB><TAB>query.add(RegexOperator("certname", r"%s" % search_arg)) <TAB>query.add(NullOperator("catalog_timestamp", False)) <TAB>nodes = get_or_abort( <TAB><TAB>puppetdb.nodes, <TAB><TAB>query=query, <TAB><TAB>include_total=True, <TAB><TAB>order_by=order_args, <TAB><TAB>**paging_args <TAB>) <TAB>catalog_list = [] <TAB>total = None <TAB>for node in nodes: <TAB><TAB>if total is None: <TAB><TAB><TAB>total = puppetdb.total <TAB><TAB>catalog_list.append( <TAB><TAB><TAB>{ <TAB><TAB><TAB><TAB>"certname": node.name, <TAB><TAB><TAB><TAB>"catalog_timestamp": node.catalog_timestamp, <MASK> <TAB>if total is None: <TAB><TAB>total = 0 <TAB>return render_template( <TAB><TAB>"catalogs.json.tpl", <TAB><TAB>total=total, <TAB><TAB>total_filtered=total, <TAB><TAB>draw=draw, <TAB><TAB>columns=CATALOGS_COLUMNS, <TAB><TAB>catalogs=catalog_list, <TAB><TAB>envs=envs, <TAB><TAB>current_env=env, <TAB>) "
"<TAB><TAB><TAB><TAB>"form": compare, <TAB><TAB><TAB>} <TAB><TAB>)"
"def __init__(self, env, command, subsystem, exit_status, exit_signal, stdout, stderr): <TAB>self.env = env <TAB>self.command = command <TAB>self.subsystem = subsystem <TAB>self.exit_status = exit_status <TAB>self.exit_signal = exit_signal <TAB>self.stdout = stdout <TAB>self.stderr = stderr <TAB>if exit_signal: <TAB><TAB>signal, core_dumped, msg, lang = exit_signal <TAB><TAB>reason = "Process exited with signal %s%s%s" % ( <TAB><TAB><TAB>signal, <TAB><TAB><TAB>": " + msg if msg else "", <TAB><TAB><TAB>" (core dumped)" if core_dumped else "", <TAB><TAB>) <TAB>else: <TAB><TAB>reason = "Process exited with non-zero exit status %s" % exit_status <TAB><TAB>lang = DEFAULT_LANG <TAB>super().__init__("Process", exit_status, reason, lang) "
"<TAB><TAB>signal, core_dumped, msg, lang = exit_signal <TAB><TAB>reason = "Process exited with signal %s%s%s" % ( <TAB><TAB><TAB>signal, <TAB><TAB><TAB>": " + msg if msg else "", <TAB><TAB><TAB>" (core dumped)" if core_dumped else "", <TAB><TAB>)"
"<TAB><TAB>signal, core_dumped, msg, lang = exit_signal <TAB><TAB>reason = "Process exited with signal<MASK>"
"def __init__(self, env, command, subsystem, exit_status, exit_signal, stdout, stderr): <TAB>self.env = env <TAB>self.command = command <TAB>self.subsystem = subsystem <TAB>self.exit_status = exit_status <TAB>self.exit_signal = exit_signal <TAB>self.stdout = stdout <TAB>self.stderr = stderr <TAB>if exit_signal: <TAB><TAB>signal, core_dumped, msg, lang = exit_signal <TAB><TAB>reason = "Process exited with signal<MASK> <TAB>else: <TAB><TAB>reason = "Process exited with non-zero exit status %s" % exit_status <TAB><TAB>lang = DEFAULT_LANG <TAB>super().__init__("Process", exit_status, reason, lang) "
"%s%s%s" % ( <TAB><TAB><TAB>signal, <TAB><TAB><TAB>": " + msg if msg else "", <TAB><TAB><TAB>" (core dumped)" if core_dumped else "", <TAB><TAB>)"
"<TAB><TAB>signal, core_dumped, msg, lang = exit_signal <TAB><TAB>reason = "Process exited with signal %s%s%s" % ( <TAB><TAB><TAB>signal, <TAB><TAB><TAB>": " + msg if msg else<MASK>"
"def __init__(self, env, command, subsystem, exit_status, exit_signal, stdout, stderr): <TAB>self.env = env <TAB>self.command = command <TAB>self.subsystem = subsystem <TAB>self.exit_status = exit_status <TAB>self.exit_signal = exit_signal <TAB>self.stdout = stdout <TAB>self.stderr = stderr <TAB>if exit_signal: <TAB><TAB>signal, core_dumped, msg, lang = exit_signal <TAB><TAB>reason = "Process exited with signal %s%s%s" % ( <TAB><TAB><TAB>signal, <TAB><TAB><TAB>": " + msg if msg else<MASK> <TAB>else: <TAB><TAB>reason = "Process exited with non-zero exit status %s" % exit_status <TAB><TAB>lang = DEFAULT_LANG <TAB>super().__init__("Process", exit_status, reason, lang) "
""", <TAB><TAB><TAB>" (core dumped)" if core_dumped else "", <TAB><TAB>)"
"<TAB><TAB>signal, core_dumped, msg, lang = exit_signal <TAB><TAB>reason = "Process exited with signal %s%s%s" % ( <TAB><TAB><TAB>signal, <TAB><TAB><TAB>": " + msg if msg else "", <TAB><TAB><TAB>" (core<MASK>"
"def __init__(self, env, command, subsystem, exit_status, exit_signal, stdout, stderr): <TAB>self.env = env <TAB>self.command = command <TAB>self.subsystem = subsystem <TAB>self.exit_status = exit_status <TAB>self.exit_signal = exit_signal <TAB>self.stdout = stdout <TAB>self.stderr = stderr <TAB>if exit_signal: <TAB><TAB>signal, core_dumped, msg, lang = exit_signal <TAB><TAB>reason = "Process exited with signal %s%s%s" % ( <TAB><TAB><TAB>signal, <TAB><TAB><TAB>": " + msg if msg else "", <TAB><TAB><TAB>" (core<MASK> <TAB>else: <TAB><TAB>reason = "Process exited with non-zero exit status %s" % exit_status <TAB><TAB>lang = DEFAULT_LANG <TAB>super().__init__("Process", exit_status, reason, lang) "
"dumped)" if core_dumped else "", <TAB><TAB>)"
"def test_end_to_end(self): <TAB># read from STDIN, a regular file, and a .gz <TAB>stdin = BytesIO(b"foo\nbar\n") <TAB>input_path = join(self.tmp_dir, "input") <TAB>with open(input_path, "w") as input_file: <TAB><TAB>input_file.write("bar\nqux\n") <TAB>input_gz_path = join(self.tmp_dir, "input.gz") <TAB>input_gz = gzip.GzipFile(input_gz_path, "wb") <TAB>input_gz.write(b"foo\n") <TAB>input_gz.close() <TAB>input_gz_glob = join(self.tmp_dir, "*.gz") <TAB>mr_job = MRTwoStepJob( <TAB><TAB>["-r", "local", "--num-cores", "4", "-", input_path, input_gz_glob] <TAB>) <TAB>mr_job.sandbox(stdin=stdin) <TAB>local_tmp_dir = None <TAB>results = [] <TAB>with mr_job.make_runner() as runner: <TAB><TAB>assert isinstance(runner, LocalMRJobRunner) <TAB><TAB>runner.run() <TAB><TAB>results.extend(mr_job.parse_output(runner.cat_output())) <TAB><TAB>local_tmp_dir = runner._get_local_tmp_dir() <TAB><TAB>assert exists(local_tmp_dir) <TAB><TAB>self.assertGreater(runner.counters()[0]["count"]["combiners"], 0) <TAB># make sure cleanup happens <TAB>assert not exists(local_tmp_dir) <TAB>self.assertEqual(sorted(results), [(1, "qux"), (2, "bar"), (2, "foo"), (5, None)]) "
"<TAB><TAB>assert isinstance(runner, LocalMRJobRunner) <TAB><TAB>runner.run() <TAB><TAB>results.extend(mr_job.parse_output(runner.cat_output())) <TAB><TAB>local_tmp_dir = runner._get_local_tmp_dir() <TAB><TAB>assert exists(local_tmp_dir) <TAB><TAB>self.assertGreater(runner.counters()[0]["count"]["combiners"], 0)"
"<TAB><TAB>assert isinstance(runner, LocalMRJobRunner) <TAB><TAB>runner.run() <TAB><TAB>results.extend(mr_job.parse_output(runner.cat_output())) <MASK>"
"def test_end_to_end(self): <TAB># read from STDIN, a regular file, and a .gz <TAB>stdin = BytesIO(b"foo\nbar\n") <TAB>input_path = join(self.tmp_dir, "input") <TAB>with open(input_path, "w") as input_file: <TAB><TAB>input_file.write("bar\nqux\n") <TAB>input_gz_path = join(self.tmp_dir, "input.gz") <TAB>input_gz = gzip.GzipFile(input_gz_path, "wb") <TAB>input_gz.write(b"foo\n") <TAB>input_gz.close() <TAB>input_gz_glob = join(self.tmp_dir, "*.gz") <TAB>mr_job = MRTwoStepJob( <TAB><TAB>["-r", "local", "--num-cores", "4", "-", input_path, input_gz_glob] <TAB>) <TAB>mr_job.sandbox(stdin=stdin) <TAB>local_tmp_dir = None <TAB>results = [] <TAB>with mr_job.make_runner() as runner: <TAB><TAB>assert isinstance(runner, LocalMRJobRunner) <TAB><TAB>runner.run() <TAB><TAB>results.extend(mr_job.parse_output(runner.cat_output())) <MASK> <TAB># make sure cleanup happens <TAB>assert not exists(local_tmp_dir) <TAB>self.assertEqual(sorted(results), [(1, "qux"), (2, "bar"), (2, "foo"), (5, None)]) "
"<TAB><TAB>local_tmp_dir = runner._get_local_tmp_dir() <TAB><TAB>assert exists(local_tmp_dir) <TAB><TAB>self.assertGreater(runner.counters()[0]["count"]["combiners"], 0)"
"<TAB><TAB>assert isinstance(runner, LocalMRJobRunner) <TAB><TAB>runner.run() <TAB><TAB>results.extend(mr_job.parse_output(runner.cat_output())) <TAB><TAB>local_tmp_dir = runner._get_local_tmp_dir() <TAB><TAB>assert exists(local_tmp_dir) <MASK>"
"def test_end_to_end(self): <TAB># read from STDIN, a regular file, and a .gz <TAB>stdin = BytesIO(b"foo\nbar\n") <TAB>input_path = join(self.tmp_dir, "input") <TAB>with open(input_path, "w") as input_file: <TAB><TAB>input_file.write("bar\nqux\n") <TAB>input_gz_path = join(self.tmp_dir, "input.gz") <TAB>input_gz = gzip.GzipFile(input_gz_path, "wb") <TAB>input_gz.write(b"foo\n") <TAB>input_gz.close() <TAB>input_gz_glob = join(self.tmp_dir, "*.gz") <TAB>mr_job = MRTwoStepJob( <TAB><TAB>["-r", "local", "--num-cores", "4", "-", input_path, input_gz_glob] <TAB>) <TAB>mr_job.sandbox(stdin=stdin) <TAB>local_tmp_dir = None <TAB>results = [] <TAB>with mr_job.make_runner() as runner: <TAB><TAB>assert isinstance(runner, LocalMRJobRunner) <TAB><TAB>runner.run() <TAB><TAB>results.extend(mr_job.parse_output(runner.cat_output())) <TAB><TAB>local_tmp_dir = runner._get_local_tmp_dir() <TAB><TAB>assert exists(local_tmp_dir) <MASK> <TAB># make sure cleanup happens <TAB>assert not exists(local_tmp_dir) <TAB>self.assertEqual(sorted(results), [(1, "qux"), (2, "bar"), (2, "foo"), (5, None)]) "
"<TAB><TAB>self.assertGreater(runner.counters()[0]["count"]["combiners"], 0)"
"<TAB><TAB>assert isinstance(runner, LocalMRJobRunner) <TAB><TAB>runner.run() <TAB><TAB>results.extend(mr_job.parse_output(runner.cat_output())) <TAB><TAB>local_tmp_dir = runner._get_local_tmp_dir() <TAB><TAB>assert exists(local_tmp_dir) <MASK>"
"def test_end_to_end(self): <TAB># read from STDIN, a regular file, and a .gz <TAB>stdin = BytesIO(b"foo\nbar\n") <TAB>input_path = join(self.tmp_dir, "input") <TAB>with open(input_path, "w") as input_file: <TAB><TAB>input_file.write("bar\nqux\n") <TAB>input_gz_path = join(self.tmp_dir, "input.gz") <TAB>input_gz = gzip.GzipFile(input_gz_path, "wb") <TAB>input_gz.write(b"foo\n") <TAB>input_gz.close() <TAB>input_gz_glob = join(self.tmp_dir, "*.gz") <TAB>mr_job = MRTwoStepJob( <TAB><TAB>["-r", "local", "--num-cores", "4", "-", input_path, input_gz_glob] <TAB>) <TAB>mr_job.sandbox(stdin=stdin) <TAB>local_tmp_dir = None <TAB>results = [] <TAB>with mr_job.make_runner() as runner: <TAB><TAB>assert isinstance(runner, LocalMRJobRunner) <TAB><TAB>runner.run() <TAB><TAB>results.extend(mr_job.parse_output(runner.cat_output())) <TAB><TAB>local_tmp_dir = runner._get_local_tmp_dir() <TAB><TAB>assert exists(local_tmp_dir) <MASK> <TAB># make sure cleanup happens <TAB>assert not exists(local_tmp_dir) <TAB>self.assertEqual(sorted(results), [(1, "qux"), (2, "bar"), (2, "foo"), (5, None)]) "
"<TAB><TAB>self.assertGreater(runner.counters()[0]["count"]["combiners"], 0)"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"] <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>) <TAB><TAB>return False, err_message <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"]"
"<TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <MASK>"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <MASK> <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>) <TAB><TAB>return False, err_message <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"]"
"<TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <MASK>"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <MASK> <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>) <TAB><TAB>return False, err_message <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"]"
"<TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <MASK>"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <MASK> <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>) <TAB><TAB>return False, err_message <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"]"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"] <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>) <TAB><TAB>return False, err_message <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"]"
"<TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <MASK>"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <MASK> <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"] <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>) <TAB><TAB>return False, err_message <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"]"
"<TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <MASK>"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <MASK> <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"] <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>) <TAB><TAB>return False, err_message <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"]"
"<TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <MASK>"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <MASK> <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"] <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>) <TAB><TAB>return False, err_message <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"]"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"] <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>) <TAB><TAB>return False, err_message <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>) <TAB><TAB>return False, err_message"
"<TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <MASK>"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"] <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <MASK> <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>) <TAB><TAB>return False, err_message"
"<TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <MASK>"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"] <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <MASK> <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>) <TAB><TAB>return False, err_message"
"<TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <MASK>"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"] <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <MASK> <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB><TAB><TAB>) <TAB><TAB>return False, err_message"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"] <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>) <TAB><TAB>return False, err_message <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>)"
"<TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <MASK>"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"] <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <MASK> <TAB><TAB>return False, err_message <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>)"
"<TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <MASK>"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"] <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <MASK> <TAB><TAB>return False, err_message <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>)"
"<TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <MASK>"
"def set_hybrid_config(experiment_config, port, config_file_name): <TAB>"""set hybrid configuration""" <TAB>hybrid_config_data = dict() <TAB>hybrid_config_data["hybrid_config"] = experiment_config["hybridConfig"] <TAB>platform_list = experiment_config["hybridConfig"]["trainingServicePlatforms"] <TAB>for platform in platform_list: <TAB><TAB>if platform == "aml": <TAB><TAB><TAB>hybrid_config_data["aml_config"] = experiment_config["amlConfig"] <TAB><TAB>elif platform == "remote": <TAB><TAB><TAB>if experiment_config.get("remoteConfig"): <TAB><TAB><TAB><TAB>hybrid_config_data["remote_config"] = experiment_config["remoteConfig"] <TAB><TAB><TAB>hybrid_config_data["machine_list"] = experiment_config["machineList"] <TAB><TAB>elif platform == "local" and experiment_config.get("localConfig"): <TAB><TAB><TAB>hybrid_config_data["local_config"] = experiment_config["localConfig"] <TAB><TAB>elif platform == "pai": <TAB><TAB><TAB>hybrid_config_data["pai_config"] = experiment_config["paiConfig"] <TAB># It needs to connect all remote machines, set longer timeout here to wait for restful server connection response. <TAB>time_out = 60 if "remote" in platform_list else REST_TIME_OUT <TAB>response = rest_put( <TAB><TAB>cluster_metadata_url(port), json.dumps(hybrid_config_data), time_out <TAB>) <TAB>err_message = None <TAB>if not response or not response.status_code == 200: <TAB><TAB>if response is not None: <TAB><TAB><TAB>err_message = response.text <TAB><TAB><TAB>_, stderr_full_path = get_log_path(config_file_name) <TAB><TAB><TAB>with open(stderr_full_path, "a+") as fout: <TAB><TAB><TAB><TAB>fout.write( <TAB><TAB><TAB><TAB><TAB>json.dumps( <TAB><TAB><TAB><TAB><TAB><TAB>json.loads(err_message), <TAB><TAB><TAB><TAB><TAB><TAB>indent=4, <TAB><TAB><TAB><TAB><TAB><TAB>sort_keys=True, <TAB><TAB><TAB><TAB><TAB><TAB>separators=(",", ":"), <MASK> <TAB><TAB>return False, err_message <TAB>result, message = setNNIManagerIp(experiment_config, port, config_file_name) <TAB>if not result: <TAB><TAB>return result, message <TAB># set trial_config <TAB>return set_trial_config(experiment_config, port, config_file_name), err_message "
"<TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>)"
"def write_border(self): <TAB>for x in range(self.NCOLS): <TAB><TAB>self.text_buffer[0][x] = 0xCD <TAB><TAB>self.text_buffer[2][x] = 0xCD <TAB><TAB>self.text_buffer[self.NROWS - 1][x] = 0xCD <TAB>for y in range(3, self.NROWS): <TAB><TAB>self.text_buffer[y][0] = 0xBA <TAB><TAB>self.text_buffer[y][9] = 0xB3 <TAB><TAB>self.text_buffer[y][self.NCOLS - 1] = 0xBA <TAB>self.text_buffer[0][0] = 0xC9 <TAB>self.text_buffer[1][0] = 0xBA <TAB>self.text_buffer[0][self.NCOLS - 1] = 0xBB <TAB>self.text_buffer[1][self.NCOLS - 1] = 0xBA <TAB>self.text_buffer[2][0] = 0xCC <TAB>self.text_buffer[2][9] = 0xD1 <TAB>self.text_buffer[2][self.NCOLS - 1] = 0xB9 <TAB>self.text_buffer[self.NROWS - 1][0] = 0xC8 <TAB>self.text_buffer[self.NROWS - 1][9] = 0xCF <TAB>self.text_buffer[self.NROWS - 1][self.NCOLS - 1] = 0xBC "
"<TAB><TAB>self.text_buffer[0][x] = 0xCD <TAB><TAB>self.text_buffer[2][x] = 0xCD <TAB><TAB>self.text_buffer[self.NROWS - 1][x] = 0xCD"
"<MASK>"
"def write_border(self): <TAB>for x in range(self.NCOLS): <MASK> <TAB>for y in range(3, self.NROWS): <TAB><TAB>self.text_buffer[y][0] = 0xBA <TAB><TAB>self.text_buffer[y][9] = 0xB3 <TAB><TAB>self.text_buffer[y][self.NCOLS - 1] = 0xBA <TAB>self.text_buffer[0][0] = 0xC9 <TAB>self.text_buffer[1][0] = 0xBA <TAB>self.text_buffer[0][self.NCOLS - 1] = 0xBB <TAB>self.text_buffer[1][self.NCOLS - 1] = 0xBA <TAB>self.text_buffer[2][0] = 0xCC <TAB>self.text_buffer[2][9] = 0xD1 <TAB>self.text_buffer[2][self.NCOLS - 1] = 0xB9 <TAB>self.text_buffer[self.NROWS - 1][0] = 0xC8 <TAB>self.text_buffer[self.NROWS - 1][9] = 0xCF <TAB>self.text_buffer[self.NROWS - 1][self.NCOLS - 1] = 0xBC "
"<TAB><TAB>self.text_buffer[0][x] = 0xCD <TAB><TAB>self.text_buffer[2][x] = 0xCD <TAB><TAB>self.text_buffer[self.NROWS - 1][x] = 0xCD"
"<TAB><TAB>self.text_buffer[0][x] = 0xCD <TAB><TAB>self.text_buffer[2][x] = 0xCD <MASK>"
"def write_border(self): <TAB>for x in range(self.NCOLS): <TAB><TAB>self.text_buffer[0][x] = 0xCD <TAB><TAB>self.text_buffer[2][x] = 0xCD <MASK> <TAB>for y in range(3, self.NROWS): <TAB><TAB>self.text_buffer[y][0] = 0xBA <TAB><TAB>self.text_buffer[y][9] = 0xB3 <TAB><TAB>self.text_buffer[y][self.NCOLS - 1] = 0xBA <TAB>self.text_buffer[0][0] = 0xC9 <TAB>self.text_buffer[1][0] = 0xBA <TAB>self.text_buffer[0][self.NCOLS - 1] = 0xBB <TAB>self.text_buffer[1][self.NCOLS - 1] = 0xBA <TAB>self.text_buffer[2][0] = 0xCC <TAB>self.text_buffer[2][9] = 0xD1 <TAB>self.text_buffer[2][self.NCOLS - 1] = 0xB9 <TAB>self.text_buffer[self.NROWS - 1][0] = 0xC8 <TAB>self.text_buffer[self.NROWS - 1][9] = 0xCF <TAB>self.text_buffer[self.NROWS - 1][self.NCOLS - 1] = 0xBC "
"<TAB><TAB>self.text_buffer[self.NROWS - 1][x] = 0xCD"
"<TAB><TAB>self.text_buffer[0][x] = 0xCD <TAB><TAB>self.text_buffer[2][x] = 0xCD <MASK>"
"def write_border(self): <TAB>for x in range(self.NCOLS): <TAB><TAB>self.text_buffer[0][x] = 0xCD <TAB><TAB>self.text_buffer[2][x] = 0xCD <MASK> <TAB>for y in range(3, self.NROWS): <TAB><TAB>self.text_buffer[y][0] = 0xBA <TAB><TAB>self.text_buffer[y][9] = 0xB3 <TAB><TAB>self.text_buffer[y][self.NCOLS - 1] = 0xBA <TAB>self.text_buffer[0][0] = 0xC9 <TAB>self.text_buffer[1][0] = 0xBA <TAB>self.text_buffer[0][self.NCOLS - 1] = 0xBB <TAB>self.text_buffer[1][self.NCOLS - 1] = 0xBA <TAB>self.text_buffer[2][0] = 0xCC <TAB>self.text_buffer[2][9] = 0xD1 <TAB>self.text_buffer[2][self.NCOLS - 1] = 0xB9 <TAB>self.text_buffer[self.NROWS - 1][0] = 0xC8 <TAB>self.text_buffer[self.NROWS - 1][9] = 0xCF <TAB>self.text_buffer[self.NROWS - 1][self.NCOLS - 1] = 0xBC "
"<TAB><TAB>self.text_buffer[self.NROWS - 1][x] = 0xCD"
"def write_border(self): <TAB>for x in range(self.NCOLS): <TAB><TAB>self.text_buffer[0][x] = 0xCD <TAB><TAB>self.text_buffer[2][x] = 0xCD <TAB><TAB>self.text_buffer[self.NROWS - 1][x] = 0xCD <TAB>for y in range(3, self.NROWS): <TAB><TAB>self.text_buffer[y][0] = 0xBA <TAB><TAB>self.text_buffer[y][9] = 0xB3 <TAB><TAB>self.text_buffer[y][self.NCOLS - 1] = 0xBA <TAB>self.text_buffer[0][0] = 0xC9 <TAB>self.text_buffer[1][0] = 0xBA <TAB>self.text_buffer[0][self.NCOLS - 1] = 0xBB <TAB>self.text_buffer[1][self.NCOLS - 1] = 0xBA <TAB>self.text_buffer[2][0] = 0xCC <TAB>self.text_buffer[2][9] = 0xD1 <TAB>self.text_buffer[2][self.NCOLS - 1] = 0xB9 <TAB>self.text_buffer[self.NROWS - 1][0] = 0xC8 <TAB>self.text_buffer[self.NROWS - 1][9] = 0xCF <TAB>self.text_buffer[self.NROWS - 1][self.NCOLS - 1] = 0xBC "
"<TAB><TAB>self.text_buffer[y][0] = 0xBA <TAB><TAB>self.text_buffer[y][9] = 0xB3 <TAB><TAB>self.text_buffer[y][self.NCOLS - 1] = 0xBA"
"<MASK>"
"def write_border(self): <TAB>for x in range(self.NCOLS): <TAB><TAB>self.text_buffer[0][x] = 0xCD <TAB><TAB>self.text_buffer[2][x] = 0xCD <TAB><TAB>self.text_buffer[self.NROWS - 1][x] = 0xCD <TAB>for y in range(3, self.NROWS): <MASK> <TAB>self.text_buffer[0][0] = 0xC9 <TAB>self.text_buffer[1][0] = 0xBA <TAB>self.text_buffer[0][self.NCOLS - 1] = 0xBB <TAB>self.text_buffer[1][self.NCOLS - 1] = 0xBA <TAB>self.text_buffer[2][0] = 0xCC <TAB>self.text_buffer[2][9] = 0xD1 <TAB>self.text_buffer[2][self.NCOLS - 1] = 0xB9 <TAB>self.text_buffer[self.NROWS - 1][0] = 0xC8 <TAB>self.text_buffer[self.NROWS - 1][9] = 0xCF <TAB>self.text_buffer[self.NROWS - 1][self.NCOLS - 1] = 0xBC "
"<TAB><TAB>self.text_buffer[y][0] = 0xBA <TAB><TAB>self.text_buffer[y][9] = 0xB3 <TAB><TAB>self.text_buffer[y][self.NCOLS - 1] = 0xBA"
"<TAB><TAB>self.text_buffer[y][0] = 0xBA <TAB><TAB>self.text_buffer[y][9] = 0xB3 <MASK>"
"def write_border(self): <TAB>for x in range(self.NCOLS): <TAB><TAB>self.text_buffer[0][x] = 0xCD <TAB><TAB>self.text_buffer[2][x] = 0xCD <TAB><TAB>self.text_buffer[self.NROWS - 1][x] = 0xCD <TAB>for y in range(3, self.NROWS): <TAB><TAB>self.text_buffer[y][0] = 0xBA <TAB><TAB>self.text_buffer[y][9] = 0xB3 <MASK> <TAB>self.text_buffer[0][0] = 0xC9 <TAB>self.text_buffer[1][0] = 0xBA <TAB>self.text_buffer[0][self.NCOLS - 1] = 0xBB <TAB>self.text_buffer[1][self.NCOLS - 1] = 0xBA <TAB>self.text_buffer[2][0] = 0xCC <TAB>self.text_buffer[2][9] = 0xD1 <TAB>self.text_buffer[2][self.NCOLS - 1] = 0xB9 <TAB>self.text_buffer[self.NROWS - 1][0] = 0xC8 <TAB>self.text_buffer[self.NROWS - 1][9] = 0xCF <TAB>self.text_buffer[self.NROWS - 1][self.NCOLS - 1] = 0xBC "
"<TAB><TAB>self.text_buffer[y][self.NCOLS - 1] = 0xBA"
"<TAB><TAB>self.text_buffer[y][0] = 0xBA <TAB><TAB>self.text_buffer[y][9] = 0xB3 <MASK>"
"def write_border(self): <TAB>for x in range(self.NCOLS): <TAB><TAB>self.text_buffer[0][x] = 0xCD <TAB><TAB>self.text_buffer[2][x] = 0xCD <TAB><TAB>self.text_buffer[self.NROWS - 1][x] = 0xCD <TAB>for y in range(3, self.NROWS): <TAB><TAB>self.text_buffer[y][0] = 0xBA <TAB><TAB>self.text_buffer[y][9] = 0xB3 <MASK> <TAB>self.text_buffer[0][0] = 0xC9 <TAB>self.text_buffer[1][0] = 0xBA <TAB>self.text_buffer[0][self.NCOLS - 1] = 0xBB <TAB>self.text_buffer[1][self.NCOLS - 1] = 0xBA <TAB>self.text_buffer[2][0] = 0xCC <TAB>self.text_buffer[2][9] = 0xD1 <TAB>self.text_buffer[2][self.NCOLS - 1] = 0xB9 <TAB>self.text_buffer[self.NROWS - 1][0] = 0xC8 <TAB>self.text_buffer[self.NROWS - 1][9] = 0xCF <TAB>self.text_buffer[self.NROWS - 1][self.NCOLS - 1] = 0xBC "
"<TAB><TAB>self.text_buffer[y][self.NCOLS - 1] = 0xBA"
"def test_transaction_commit_low_level(database_url): <TAB>""" <TAB>Ensure that an explicit `await transaction.commit()` is supported. <TAB>""" <TAB>async with Database(database_url) as database: <TAB><TAB>async with database.transaction(force_rollback=True): <TAB><TAB><TAB>transaction = await database.transaction() <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>query = notes.insert().values(text="example1", completed=True) <TAB><TAB><TAB><TAB>await database.execute(query) <TAB><TAB><TAB>except: # pragma: no cover <TAB><TAB><TAB><TAB>await transaction.rollback() <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>await transaction.commit() <TAB><TAB><TAB>query = notes.select() <TAB><TAB><TAB>results = await database.fetch_all(query=query) <TAB><TAB><TAB>assert len(results) == 1 "
"<TAB><TAB>async with database.transaction(force_rollback=True): <TAB><TAB><TAB>transaction = await database.transaction() <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>query = notes.insert().values(text="example1", completed=True) <TAB><TAB><TAB><TAB>await database.execute(query) <TAB><TAB><TAB>except: # pragma: no cover <TAB><TAB><TAB><TAB>await transaction.rollback() <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>await transaction.commit() <TAB><TAB><TAB>query = notes.select() <TAB><TAB><TAB>results = await database.fetch_all(query=query) <TAB><TAB><TAB>assert len(results) == 1"
"<TAB><TAB>async with database.transaction(force_rollback=True): <TAB><TAB><TAB>transaction = await database.transaction() <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>query = notes.insert().values(text="example1", completed=True) <TAB><TAB><TAB><TAB>await database.execute(query) <TAB><TAB><TAB>except: # pragma: no cover <TAB><TAB><TAB><TAB>await transaction.rollback() <TAB><TAB><TAB>else: <MASK>"
"def test_transaction_commit_low_level(database_url): <TAB>""" <TAB>Ensure that an explicit `await transaction.commit()` is supported. <TAB>""" <TAB>async with Database(database_url) as database: <TAB><TAB>async with database.transaction(force_rollback=True): <TAB><TAB><TAB>transaction = await database.transaction() <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>query = notes.insert().values(text="example1", completed=True) <TAB><TAB><TAB><TAB>await database.execute(query) <TAB><TAB><TAB>except: # pragma: no cover <TAB><TAB><TAB><TAB>await transaction.rollback() <TAB><TAB><TAB>else: <MASK> "
"<TAB><TAB><TAB><TAB>await transaction.commit() <TAB><TAB><TAB>query = notes.select() <TAB><TAB><TAB>results = await database.fetch_all(query=query) <TAB><TAB><TAB>assert len(results) == 1"
"<TAB><TAB>async with database.transaction(force_rollback=True): <TAB><TAB><TAB>transaction = await database.transaction() <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>query = notes.insert().values(text="example1", completed=True) <TAB><TAB><TAB><TAB>await database.execute(query) <TAB><TAB><TAB>except: # pragma: no cover <TAB><TAB><TAB><TAB>await transaction.rollback() <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>await transaction.commit() <TAB><TAB><TAB>query = notes.select() <TAB><TAB><TAB>results = await<MASK>"
"def test_transaction_commit_low_level(database_url): <TAB>""" <TAB>Ensure that an explicit `await transaction.commit()` is supported. <TAB>""" <TAB>async with Database(database_url) as database: <TAB><TAB>async with database.transaction(force_rollback=True): <TAB><TAB><TAB>transaction = await database.transaction() <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>query = notes.insert().values(text="example1", completed=True) <TAB><TAB><TAB><TAB>await database.execute(query) <TAB><TAB><TAB>except: # pragma: no cover <TAB><TAB><TAB><TAB>await transaction.rollback() <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>await transaction.commit() <TAB><TAB><TAB>query = notes.select() <TAB><TAB><TAB>results = await<MASK> "
"database.fetch_all(query=query) <TAB><TAB><TAB>assert len(results) == 1"
"<TAB><TAB>async with database.transaction(force_rollback=True): <TAB><TAB><TAB>transaction = await database.transaction() <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>query = notes.insert().values(text="example1", completed=True) <TAB><TAB><TAB><TAB>await database.execute(query) <TAB><TAB><TAB>except: # pragma: no cover <TAB><TAB><TAB><TAB>await transaction.rollback() <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>await transaction.commit() <TAB><TAB><TAB>query = notes.select() <TAB><TAB><TAB>results = await<MASK>"
"def test_transaction_commit_low_level(database_url): <TAB>""" <TAB>Ensure that an explicit `await transaction.commit()` is supported. <TAB>""" <TAB>async with Database(database_url) as database: <TAB><TAB>async with database.transaction(force_rollback=True): <TAB><TAB><TAB>transaction = await database.transaction() <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>query = notes.insert().values(text="example1", completed=True) <TAB><TAB><TAB><TAB>await database.execute(query) <TAB><TAB><TAB>except: # pragma: no cover <TAB><TAB><TAB><TAB>await transaction.rollback() <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>await transaction.commit() <TAB><TAB><TAB>query = notes.select() <TAB><TAB><TAB>results = await<MASK> "
"database.fetch_all(query=query) <TAB><TAB><TAB>assert len(results) == 1"
"def replace_masked_tokens(self, tokens, masked_pos): <TAB>if self.span_same_mask: <TAB><TAB>masked_pos = sorted(list(masked_pos)) <TAB>prev_pos, prev_rand = None, None <TAB>for pos in masked_pos: <TAB><TAB>if self.span_same_mask and (pos - 1 == prev_pos): <TAB><TAB><TAB>t_rand = prev_rand <TAB><TAB>else: <TAB><TAB><TAB>t_rand = rand() <TAB><TAB>if t_rand < 0.8: # 80% <TAB><TAB><TAB>tokens[pos] = "[MASK]" <TAB><TAB>elif t_rand < 0.9: # 10% <TAB><TAB><TAB>tokens[pos] = get_random_word(self.vocab_words) <TAB><TAB>prev_pos, prev_rand = pos, t_rand "
"<TAB><TAB>if self.span_same_mask and (pos - 1 == prev_pos): <TAB><TAB><TAB>t_rand = prev_rand <TAB><TAB>else: <TAB><TAB><TAB>t_rand = rand() <TAB><TAB>if t_rand < 0.8: # 80% <TAB><TAB><TAB>tokens[pos] = "[MASK]" <TAB><TAB>elif t_rand < 0.9: # 10% <TAB><TAB><TAB>tokens[pos] = get_random_word(self.vocab_words) <TAB><TAB>prev_pos, prev_rand = pos, t_rand"
"<TAB><TAB>if self.span_same_mask and (pos - 1 == prev_pos): <TAB><TAB><TAB>t_rand = prev_rand <TAB><TAB>else: <TAB><TAB><TAB>t_rand = rand() <TAB><TAB>if t_rand < 0.8: # 80% <TAB><TAB><TAB>tokens[pos] = "[MASK]" <MASK>"
"def replace_masked_tokens(self, tokens, masked_pos): <TAB>if self.span_same_mask: <TAB><TAB>masked_pos = sorted(list(masked_pos)) <TAB>prev_pos, prev_rand = None, None <TAB>for pos in masked_pos: <TAB><TAB>if self.span_same_mask and (pos - 1 == prev_pos): <TAB><TAB><TAB>t_rand = prev_rand <TAB><TAB>else: <TAB><TAB><TAB>t_rand = rand() <TAB><TAB>if t_rand < 0.8: # 80% <TAB><TAB><TAB>tokens[pos] = "[MASK]" <MASK> "
"<TAB><TAB>elif t_rand < 0.9: <TAB><TAB><TAB>tokens[pos] = get_random_word(self.vocab_words) <TAB><TAB>prev_pos, prev_rand = pos, t_rand"
"<TAB><TAB>if self.span_same_mask and (pos - 1 == prev_pos): <TAB><TAB><TAB>t_rand = prev_rand <TAB><TAB>else: <TAB><TAB><TAB>t_rand = rand() <TAB><TAB>if t_rand < 0.8: # 80% <TAB><TAB><TAB>tokens[pos] = "[MASK]" <TAB><TAB>elif t_rand < 0.9: # 10% <TAB><TAB><TAB>tokens[pos] =<MASK>"
"def replace_masked_tokens(self, tokens, masked_pos): <TAB>if self.span_same_mask: <TAB><TAB>masked_pos = sorted(list(masked_pos)) <TAB>prev_pos, prev_rand = None, None <TAB>for pos in masked_pos: <TAB><TAB>if self.span_same_mask and (pos - 1 == prev_pos): <TAB><TAB><TAB>t_rand = prev_rand <TAB><TAB>else: <TAB><TAB><TAB>t_rand = rand() <TAB><TAB>if t_rand < 0.8: # 80% <TAB><TAB><TAB>tokens[pos] = "[MASK]" <TAB><TAB>elif t_rand < 0.9: # 10% <TAB><TAB><TAB>tokens[pos] =<MASK> "
"get_random_word(self.vocab_words) <TAB><TAB>prev_pos, prev_rand = pos, t_rand"
"<TAB><TAB>if self.span_same_mask and (pos - 1 == prev_pos): <TAB><TAB><TAB>t_rand = prev_rand <TAB><TAB>else: <TAB><TAB><TAB>t_rand = rand() <TAB><TAB>if t_rand < 0.8: # 80% <TAB><TAB><TAB>tokens[pos] = "[MASK]" <TAB><TAB>elif t_rand < 0.9: # 10% <TAB><TAB><TAB>tokens[pos] = get_random_word(self.vocab_words) <MASK>"
"def replace_masked_tokens(self, tokens, masked_pos): <TAB>if self.span_same_mask: <TAB><TAB>masked_pos = sorted(list(masked_pos)) <TAB>prev_pos, prev_rand = None, None <TAB>for pos in masked_pos: <TAB><TAB>if self.span_same_mask and (pos - 1 == prev_pos): <TAB><TAB><TAB>t_rand = prev_rand <TAB><TAB>else: <TAB><TAB><TAB>t_rand = rand() <TAB><TAB>if t_rand < 0.8: # 80% <TAB><TAB><TAB>tokens[pos] = "[MASK]" <TAB><TAB>elif t_rand < 0.9: # 10% <TAB><TAB><TAB>tokens[pos] = get_random_word(self.vocab_words) <MASK> "
"<TAB><TAB>prev_pos, prev_rand = pos, t_rand"
"def file_service_properties_async(self): <TAB># Instantiate the ShareServiceClient from a connection string <TAB>from azure.storage.fileshare.aio import ShareServiceClient <TAB>file_service = ShareServiceClient.from_connection_string(self.connection_string) <TAB># [START set_service_properties] <TAB># Create service properties <TAB>from azure.storage.fileshare import Metrics, CorsRule, RetentionPolicy <TAB># Create metrics for requests statistics <TAB>hour_metrics = Metrics( <TAB><TAB>enabled=True, <TAB><TAB>include_apis=True, <TAB><TAB>retention_policy=RetentionPolicy(enabled=True, days=5), <TAB>) <TAB>minute_metrics = Metrics( <TAB><TAB>enabled=True, <TAB><TAB>include_apis=True, <TAB><TAB>retention_policy=RetentionPolicy(enabled=True, days=5), <TAB>) <TAB># Create CORS rules <TAB>cors_rule1 = CorsRule(["www.xyz.com"], ["GET"]) <TAB>allowed_origins = ["www.xyz.com", "www.ab.com", "www.bc.com"] <TAB>allowed_methods = ["GET", "PUT"] <TAB>max_age_in_seconds = 500 <TAB>exposed_headers = [ <TAB><TAB>"x-ms-meta-data*", <TAB><TAB>"x-ms-meta-source*", <TAB><TAB>"x-ms-meta-abc", <TAB><TAB>"x-ms-meta-bcd", <TAB>] <TAB>allowed_headers = [ <TAB><TAB>"x-ms-meta-data*", <TAB><TAB>"x-ms-meta-target*", <TAB><TAB>"x-ms-meta-xyz", <TAB><TAB>"x-ms-meta-foo", <TAB>] <TAB>cors_rule2 = CorsRule( <TAB><TAB>allowed_origins, <TAB><TAB>allowed_methods, <TAB><TAB>max_age_in_seconds=max_age_in_seconds, <TAB><TAB>exposed_headers=exposed_headers, <TAB><TAB>allowed_headers=allowed_headers, <TAB>) <TAB>cors = [cors_rule1, cors_rule2] <TAB>async with file_service: <TAB><TAB># Set the service properties <TAB><TAB>await file_service.set_service_properties(hour_metrics, minute_metrics, cors) <TAB><TAB># [END set_service_properties] <TAB><TAB># [START get_service_properties] <TAB><TAB>properties = await file_service.get_service_properties() "
"<TAB><TAB>await file_service.set_service_properties(hour_metrics, minute_metrics, cors) <TAB><TAB># [END set_service_properties] <TAB><TAB># [START get_service_properties] <TAB><TAB>properties = await file_service.get_service_properties()"
"<TAB><TAB>await file_service.set_service_properties(hour_metrics,<MASK>"
"def file_service_properties_async(self): <TAB># Instantiate the ShareServiceClient from a connection string <TAB>from azure.storage.fileshare.aio import ShareServiceClient <TAB>file_service = ShareServiceClient.from_connection_string(self.connection_string) <TAB># [START set_service_properties] <TAB># Create service properties <TAB>from azure.storage.fileshare import Metrics, CorsRule, RetentionPolicy <TAB># Create metrics for requests statistics <TAB>hour_metrics = Metrics( <TAB><TAB>enabled=True, <TAB><TAB>include_apis=True, <TAB><TAB>retention_policy=RetentionPolicy(enabled=True, days=5), <TAB>) <TAB>minute_metrics = Metrics( <TAB><TAB>enabled=True, <TAB><TAB>include_apis=True, <TAB><TAB>retention_policy=RetentionPolicy(enabled=True, days=5), <TAB>) <TAB># Create CORS rules <TAB>cors_rule1 = CorsRule(["www.xyz.com"], ["GET"]) <TAB>allowed_origins = ["www.xyz.com", "www.ab.com", "www.bc.com"] <TAB>allowed_methods = ["GET", "PUT"] <TAB>max_age_in_seconds = 500 <TAB>exposed_headers = [ <TAB><TAB>"x-ms-meta-data*", <TAB><TAB>"x-ms-meta-source*", <TAB><TAB>"x-ms-meta-abc", <TAB><TAB>"x-ms-meta-bcd", <TAB>] <TAB>allowed_headers = [ <TAB><TAB>"x-ms-meta-data*", <TAB><TAB>"x-ms-meta-target*", <TAB><TAB>"x-ms-meta-xyz", <TAB><TAB>"x-ms-meta-foo", <TAB>] <TAB>cors_rule2 = CorsRule( <TAB><TAB>allowed_origins, <TAB><TAB>allowed_methods, <TAB><TAB>max_age_in_seconds=max_age_in_seconds, <TAB><TAB>exposed_headers=exposed_headers, <TAB><TAB>allowed_headers=allowed_headers, <TAB>) <TAB>cors = [cors_rule1, cors_rule2] <TAB>async with file_service: <TAB><TAB># Set the service properties <TAB><TAB>await file_service.set_service_properties(hour_metrics,<MASK> "
"minute_metrics, cors) <TAB><TAB># [END set_service_properties] <TAB><TAB># [START get_service_properties] <TAB><TAB>properties = await file_service.get_service_properties()"
"<TAB><TAB>await file_service.set_service_properties(hour_metrics, minute_metrics, cors) <TAB><TAB># [END set_service_properties] <TAB><TAB># [START<MASK>"
"def file_service_properties_async(self): <TAB># Instantiate the ShareServiceClient from a connection string <TAB>from azure.storage.fileshare.aio import ShareServiceClient <TAB>file_service = ShareServiceClient.from_connection_string(self.connection_string) <TAB># [START set_service_properties] <TAB># Create service properties <TAB>from azure.storage.fileshare import Metrics, CorsRule, RetentionPolicy <TAB># Create metrics for requests statistics <TAB>hour_metrics = Metrics( <TAB><TAB>enabled=True, <TAB><TAB>include_apis=True, <TAB><TAB>retention_policy=RetentionPolicy(enabled=True, days=5), <TAB>) <TAB>minute_metrics = Metrics( <TAB><TAB>enabled=True, <TAB><TAB>include_apis=True, <TAB><TAB>retention_policy=RetentionPolicy(enabled=True, days=5), <TAB>) <TAB># Create CORS rules <TAB>cors_rule1 = CorsRule(["www.xyz.com"], ["GET"]) <TAB>allowed_origins = ["www.xyz.com", "www.ab.com", "www.bc.com"] <TAB>allowed_methods = ["GET", "PUT"] <TAB>max_age_in_seconds = 500 <TAB>exposed_headers = [ <TAB><TAB>"x-ms-meta-data*", <TAB><TAB>"x-ms-meta-source*", <TAB><TAB>"x-ms-meta-abc", <TAB><TAB>"x-ms-meta-bcd", <TAB>] <TAB>allowed_headers = [ <TAB><TAB>"x-ms-meta-data*", <TAB><TAB>"x-ms-meta-target*", <TAB><TAB>"x-ms-meta-xyz", <TAB><TAB>"x-ms-meta-foo", <TAB>] <TAB>cors_rule2 = CorsRule( <TAB><TAB>allowed_origins, <TAB><TAB>allowed_methods, <TAB><TAB>max_age_in_seconds=max_age_in_seconds, <TAB><TAB>exposed_headers=exposed_headers, <TAB><TAB>allowed_headers=allowed_headers, <TAB>) <TAB>cors = [cors_rule1, cors_rule2] <TAB>async with file_service: <TAB><TAB># Set the service properties <TAB><TAB>await file_service.set_service_properties(hour_metrics, minute_metrics, cors) <TAB><TAB># [END set_service_properties] <TAB><TAB># [START<MASK> "
"get_service_properties] <TAB><TAB>properties = await file_service.get_service_properties()"
"<TAB><TAB>await file_service.set_service_properties(hour_metrics, minute_metrics, cors) <TAB><TAB># [END set_service_properties] <TAB><TAB># [START get_service_properties] <TAB><TAB>properties<MASK>"
"def file_service_properties_async(self): <TAB># Instantiate the ShareServiceClient from a connection string <TAB>from azure.storage.fileshare.aio import ShareServiceClient <TAB>file_service = ShareServiceClient.from_connection_string(self.connection_string) <TAB># [START set_service_properties] <TAB># Create service properties <TAB>from azure.storage.fileshare import Metrics, CorsRule, RetentionPolicy <TAB># Create metrics for requests statistics <TAB>hour_metrics = Metrics( <TAB><TAB>enabled=True, <TAB><TAB>include_apis=True, <TAB><TAB>retention_policy=RetentionPolicy(enabled=True, days=5), <TAB>) <TAB>minute_metrics = Metrics( <TAB><TAB>enabled=True, <TAB><TAB>include_apis=True, <TAB><TAB>retention_policy=RetentionPolicy(enabled=True, days=5), <TAB>) <TAB># Create CORS rules <TAB>cors_rule1 = CorsRule(["www.xyz.com"], ["GET"]) <TAB>allowed_origins = ["www.xyz.com", "www.ab.com", "www.bc.com"] <TAB>allowed_methods = ["GET", "PUT"] <TAB>max_age_in_seconds = 500 <TAB>exposed_headers = [ <TAB><TAB>"x-ms-meta-data*", <TAB><TAB>"x-ms-meta-source*", <TAB><TAB>"x-ms-meta-abc", <TAB><TAB>"x-ms-meta-bcd", <TAB>] <TAB>allowed_headers = [ <TAB><TAB>"x-ms-meta-data*", <TAB><TAB>"x-ms-meta-target*", <TAB><TAB>"x-ms-meta-xyz", <TAB><TAB>"x-ms-meta-foo", <TAB>] <TAB>cors_rule2 = CorsRule( <TAB><TAB>allowed_origins, <TAB><TAB>allowed_methods, <TAB><TAB>max_age_in_seconds=max_age_in_seconds, <TAB><TAB>exposed_headers=exposed_headers, <TAB><TAB>allowed_headers=allowed_headers, <TAB>) <TAB>cors = [cors_rule1, cors_rule2] <TAB>async with file_service: <TAB><TAB># Set the service properties <TAB><TAB>await file_service.set_service_properties(hour_metrics, minute_metrics, cors) <TAB><TAB># [END set_service_properties] <TAB><TAB># [START get_service_properties] <TAB><TAB>properties<MASK> "
"= await file_service.get_service_properties()"
"def __init__(self, positions, end=None): <TAB>""" <TAB>Construct a Feature which may apply at C{positions}. <TAB>#For instance, importing some concrete subclasses (Feature is abstract) <TAB>>>> from nltk.tag.brill import Word, Pos <TAB>#Feature Word, applying at one of [-2, -1] <TAB>>>> Word([-2,-1]) <TAB>Word([-2, -1]) <TAB>#Positions need not be contiguous <TAB>>>> Word([-2,-1, 1]) <TAB>Word([-2, -1, 1]) <TAB>#Contiguous ranges can alternatively be specified giving the <TAB>#two endpoints (inclusive) <TAB>>>> Pos(-3, -1) <TAB>Pos([-3, -2, -1]) <TAB>#In two-arg form, start <= end is enforced <TAB>>>> Pos(2, 1) <TAB>Traceback (most recent call last): <TAB> File "<stdin>", line 1, in <module> <TAB> File "nltk/tbl/template.py", line 306, in __init__ <TAB><TAB>raise TypeError <TAB>ValueError: illegal interval specification: (start=2, end=1) <TAB>:type positions: list of int <TAB>:param positions: the positions at which this features should apply <TAB>:raises ValueError: illegal position specifications <TAB>An alternative calling convention, for contiguous positions only, <TAB>is Feature(start, end): <TAB>:type start: int <TAB>:param start: start of range where this feature should apply <TAB>:type end: int <TAB>:param end: end of range (NOTE: inclusive!) where this feature should apply <TAB>""" <TAB>self.positions = None # to avoid warnings <TAB>if end is None: <TAB><TAB>self.positions = tuple(sorted(set([int(i) for i in positions]))) <TAB>else: # positions was actually not a list, but only the start index <TAB><TAB>try: <TAB><TAB><TAB>if positions > end: <TAB><TAB><TAB><TAB>raise TypeError <TAB><TAB><TAB>self.positions = tuple(range(positions, end + 1)) <TAB><TAB>except TypeError: <TAB><TAB><TAB># let any kind of erroneous spec raise ValueError <TAB><TAB><TAB>raise ValueError( <TAB><TAB><TAB><TAB>"illegal interval specification: (start={0}, end={1})".format( <TAB><TAB><TAB><TAB><TAB>positions, end <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB># set property name given in subclass, or otherwise name of subclass <TAB>self.PROPERTY_NAME = self.__class__.PROPERTY_NAME or self.__class__.__name__ "
"<TAB><TAB>try: <TAB><TAB><TAB>if positions > end: <TAB><TAB><TAB><TAB>raise TypeError <TAB><TAB><TAB>self.positions = tuple(range(positions, end + 1)) <TAB><TAB>except TypeError: <TAB><TAB><TAB># let any kind of erroneous spec raise ValueError <TAB><TAB><TAB>raise ValueError( <TAB><TAB><TAB><TAB>"illegal interval specification: (start={0}, end={1})".format( <TAB><TAB><TAB><TAB><TAB>positions, end <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>)"
"<TAB><TAB>try: <TAB><TAB><TAB>if positions > end: <TAB><TAB><TAB><TAB>raise TypeError <TAB><TAB><TAB>self.positions = tuple(range(positions, end + 1)) <TAB><TAB>except TypeError: <TAB><TAB><TAB># let any kind of erroneous spec raise ValueError <MASK>"
"def __init__(self, positions, end=None): <TAB>""" <TAB>Construct a Feature which may apply at C{positions}. <TAB>#For instance, importing some concrete subclasses (Feature is abstract) <TAB>>>> from nltk.tag.brill import Word, Pos <TAB>#Feature Word, applying at one of [-2, -1] <TAB>>>> Word([-2,-1]) <TAB>Word([-2, -1]) <TAB>#Positions need not be contiguous <TAB>>>> Word([-2,-1, 1]) <TAB>Word([-2, -1, 1]) <TAB>#Contiguous ranges can alternatively be specified giving the <TAB>#two endpoints (inclusive) <TAB>>>> Pos(-3, -1) <TAB>Pos([-3, -2, -1]) <TAB>#In two-arg form, start <= end is enforced <TAB>>>> Pos(2, 1) <TAB>Traceback (most recent call last): <TAB> File "<stdin>", line 1, in <module> <TAB> File "nltk/tbl/template.py", line 306, in __init__ <TAB><TAB>raise TypeError <TAB>ValueError: illegal interval specification: (start=2, end=1) <TAB>:type positions: list of int <TAB>:param positions: the positions at which this features should apply <TAB>:raises ValueError: illegal position specifications <TAB>An alternative calling convention, for contiguous positions only, <TAB>is Feature(start, end): <TAB>:type start: int <TAB>:param start: start of range where this feature should apply <TAB>:type end: int <TAB>:param end: end of range (NOTE: inclusive!) where this feature should apply <TAB>""" <TAB>self.positions = None # to avoid warnings <TAB>if end is None: <TAB><TAB>self.positions = tuple(sorted(set([int(i) for i in positions]))) <TAB>else: # positions was actually not a list, but only the start index <TAB><TAB>try: <TAB><TAB><TAB>if positions > end: <TAB><TAB><TAB><TAB>raise TypeError <TAB><TAB><TAB>self.positions = tuple(range(positions, end + 1)) <TAB><TAB>except TypeError: <TAB><TAB><TAB># let any kind of erroneous spec raise ValueError <MASK> <TAB># set property name given in subclass, or otherwise name of subclass <TAB>self.PROPERTY_NAME = self.__class__.PROPERTY_NAME or self.__class__.__name__ "
"<TAB><TAB><TAB>raise ValueError( <TAB><TAB><TAB><TAB>"illegal interval specification: (start={0}, end={1})".format( <TAB><TAB><TAB><TAB><TAB>positions, end <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>)"
"<TAB><TAB>try: <TAB><TAB><TAB>if positions > end: <TAB><TAB><TAB><TAB>raise TypeError <TAB><TAB><TAB>self.positions = tuple(range(positions, end + 1)) <TAB><TAB>except TypeError: <TAB><TAB><TAB># let any kind of erroneous spec raise ValueError <TAB><TAB><TAB>raise ValueError( <TAB><TAB><TAB><TAB>"illegal interval specification: (start={0}, end={1})".format( <MASK>"
"def __init__(self, positions, end=None): <TAB>""" <TAB>Construct a Feature which may apply at C{positions}. <TAB>#For instance, importing some concrete subclasses (Feature is abstract) <TAB>>>> from nltk.tag.brill import Word, Pos <TAB>#Feature Word, applying at one of [-2, -1] <TAB>>>> Word([-2,-1]) <TAB>Word([-2, -1]) <TAB>#Positions need not be contiguous <TAB>>>> Word([-2,-1, 1]) <TAB>Word([-2, -1, 1]) <TAB>#Contiguous ranges can alternatively be specified giving the <TAB>#two endpoints (inclusive) <TAB>>>> Pos(-3, -1) <TAB>Pos([-3, -2, -1]) <TAB>#In two-arg form, start <= end is enforced <TAB>>>> Pos(2, 1) <TAB>Traceback (most recent call last): <TAB> File "<stdin>", line 1, in <module> <TAB> File "nltk/tbl/template.py", line 306, in __init__ <TAB><TAB>raise TypeError <TAB>ValueError: illegal interval specification: (start=2, end=1) <TAB>:type positions: list of int <TAB>:param positions: the positions at which this features should apply <TAB>:raises ValueError: illegal position specifications <TAB>An alternative calling convention, for contiguous positions only, <TAB>is Feature(start, end): <TAB>:type start: int <TAB>:param start: start of range where this feature should apply <TAB>:type end: int <TAB>:param end: end of range (NOTE: inclusive!) where this feature should apply <TAB>""" <TAB>self.positions = None # to avoid warnings <TAB>if end is None: <TAB><TAB>self.positions = tuple(sorted(set([int(i) for i in positions]))) <TAB>else: # positions was actually not a list, but only the start index <TAB><TAB>try: <TAB><TAB><TAB>if positions > end: <TAB><TAB><TAB><TAB>raise TypeError <TAB><TAB><TAB>self.positions = tuple(range(positions, end + 1)) <TAB><TAB>except TypeError: <TAB><TAB><TAB># let any kind of erroneous spec raise ValueError <TAB><TAB><TAB>raise ValueError( <TAB><TAB><TAB><TAB>"illegal interval specification: (start={0}, end={1})".format( <MASK> <TAB># set property name given in subclass, or otherwise name of subclass <TAB>self.PROPERTY_NAME = self.__class__.PROPERTY_NAME or self.__class__.__name__ "
"<TAB><TAB><TAB><TAB><TAB>positions, end <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>)"
"<TAB><TAB>try: <TAB><TAB><TAB>if positions > end: <TAB><TAB><TAB><TAB>raise TypeError <TAB><TAB><TAB>self.positions = tuple(range(positions, end + 1)) <TAB><TAB>except TypeError: <TAB><TAB><TAB># let any kind of erroneous spec raise ValueError <TAB><TAB><TAB>raise ValueError( <TAB><TAB><TAB><TAB>"illegal interval specification: (start={0}, end={1})".format( <TAB><TAB><TAB><TAB><TAB>positions,<MASK>"
"def __init__(self, positions, end=None): <TAB>""" <TAB>Construct a Feature which may apply at C{positions}. <TAB>#For instance, importing some concrete subclasses (Feature is abstract) <TAB>>>> from nltk.tag.brill import Word, Pos <TAB>#Feature Word, applying at one of [-2, -1] <TAB>>>> Word([-2,-1]) <TAB>Word([-2, -1]) <TAB>#Positions need not be contiguous <TAB>>>> Word([-2,-1, 1]) <TAB>Word([-2, -1, 1]) <TAB>#Contiguous ranges can alternatively be specified giving the <TAB>#two endpoints (inclusive) <TAB>>>> Pos(-3, -1) <TAB>Pos([-3, -2, -1]) <TAB>#In two-arg form, start <= end is enforced <TAB>>>> Pos(2, 1) <TAB>Traceback (most recent call last): <TAB> File "<stdin>", line 1, in <module> <TAB> File "nltk/tbl/template.py", line 306, in __init__ <TAB><TAB>raise TypeError <TAB>ValueError: illegal interval specification: (start=2, end=1) <TAB>:type positions: list of int <TAB>:param positions: the positions at which this features should apply <TAB>:raises ValueError: illegal position specifications <TAB>An alternative calling convention, for contiguous positions only, <TAB>is Feature(start, end): <TAB>:type start: int <TAB>:param start: start of range where this feature should apply <TAB>:type end: int <TAB>:param end: end of range (NOTE: inclusive!) where this feature should apply <TAB>""" <TAB>self.positions = None # to avoid warnings <TAB>if end is None: <TAB><TAB>self.positions = tuple(sorted(set([int(i) for i in positions]))) <TAB>else: # positions was actually not a list, but only the start index <TAB><TAB>try: <TAB><TAB><TAB>if positions > end: <TAB><TAB><TAB><TAB>raise TypeError <TAB><TAB><TAB>self.positions = tuple(range(positions, end + 1)) <TAB><TAB>except TypeError: <TAB><TAB><TAB># let any kind of erroneous spec raise ValueError <TAB><TAB><TAB>raise ValueError( <TAB><TAB><TAB><TAB>"illegal interval specification: (start={0}, end={1})".format( <TAB><TAB><TAB><TAB><TAB>positions,<MASK> <TAB># set property name given in subclass, or otherwise name of subclass <TAB>self.PROPERTY_NAME = self.__class__.PROPERTY_NAME or self.__class__.__name__ "
"end <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>)"
"def test_ensure_get_matrix_does_not_clone_if_format_agrees(self): <TAB>matrix = np.matrix([]) <TAB>for sparse_format in SPARSE_MATRIX_FORMATS: <TAB><TAB>created_matrix = matrix_creation_function_for_format(sparse_format)(matrix) <TAB><TAB>converted_matrix = get_matrix_in_format(created_matrix, sparse_format) <TAB><TAB>self.assertTrue(id(created_matrix) == id(converted_matrix)) "
"<TAB><TAB>created_matrix = matrix_creation_function_for_format(sparse_format)(matrix) <TAB><TAB>converted_matrix = get_matrix_in_format(created_matrix, sparse_format) <TAB><TAB>self.assertTrue(id(created_matrix) == id(converted_matrix))"
"<TAB><TAB>created_matrix =<MASK>"
"def test_ensure_get_matrix_does_not_clone_if_format_agrees(self): <TAB>matrix = np.matrix([]) <TAB>for sparse_format in SPARSE_MATRIX_FORMATS: <TAB><TAB>created_matrix =<MASK> "
"matrix_creation_function_for_format(sparse_format)(matrix) <TAB><TAB>converted_matrix = get_matrix_in_format(created_matrix, sparse_format) <TAB><TAB>self.assertTrue(id(created_matrix) == id(converted_matrix))"
"<TAB><TAB>created_matrix = matrix_creation_function_for_format(sparse_format)(matrix) <TAB><TAB>converted_matrix = get_matrix_in_format(created_matrix, sparse_format) <MASK>"
"def test_ensure_get_matrix_does_not_clone_if_format_agrees(self): <TAB>matrix = np.matrix([]) <TAB>for sparse_format in SPARSE_MATRIX_FORMATS: <TAB><TAB>created_matrix = matrix_creation_function_for_format(sparse_format)(matrix) <TAB><TAB>converted_matrix = get_matrix_in_format(created_matrix, sparse_format) <MASK> "
"<TAB><TAB>self.assertTrue(id(created_matrix) == id(converted_matrix))"
"<TAB><TAB>created_matrix = matrix_creation_function_for_format(sparse_format)(matrix) <TAB><TAB>converted_matrix = get_matrix_in_format(created_matrix, sparse_format) <MASK>"
"def test_ensure_get_matrix_does_not_clone_if_format_agrees(self): <TAB>matrix = np.matrix([]) <TAB>for sparse_format in SPARSE_MATRIX_FORMATS: <TAB><TAB>created_matrix = matrix_creation_function_for_format(sparse_format)(matrix) <TAB><TAB>converted_matrix = get_matrix_in_format(created_matrix, sparse_format) <MASK> "
"<TAB><TAB>self.assertTrue(id(created_matrix) == id(converted_matrix))"
"def ZwOpenFile(self, emu, argv, ctx={}): <TAB>""" <TAB>__kernel_entry NTSYSCALLAPI NTSTATUS NtOpenFile( <TAB> PHANDLE FileHandle, <TAB> ACCESS_MASK DesiredAccess, <TAB> POBJECT_ATTRIBUTES ObjectAttributes, <TAB> PIO_STATUS_BLOCK IoStatusBlock, <TAB> ULONG ShareAccess, <TAB> ULONG OpenOptions <TAB>); <TAB>""" <TAB>pHndl, access, objattr, statblock, share, open_opts = argv <TAB>nts = ddk.STATUS_OBJECT_NAME_NOT_FOUND <TAB>hfile = None <TAB>oa = self.win.OBJECT_ATTRIBUTES(emu.get_ptr_size()) <TAB>oa = self.mem_cast(oa, objattr) <TAB>path = self.read_unicode_string(oa.ObjectName) <TAB>argv[3] = path <TAB>ad = ddk.get_file_access_defines(access) <TAB>if ad: <TAB><TAB>argv[1] = " | ".join(ad) <TAB>obj = self.get_object_from_name(path) <TAB>if obj: <TAB><TAB>nts = ddk.STATUS_SUCCESS <TAB><TAB>hfile = self.get_object_handle(obj) <TAB>else: <TAB><TAB># Is a file being opened? <TAB><TAB>npath = path <TAB><TAB>if path.startswith("\\??\\"): <TAB><TAB><TAB>npath = path.strip("\\??\\") <TAB><TAB>npath = npath.rstrip("\\") <TAB><TAB>hfile = emu.file_open(npath) <TAB><TAB>if hfile: <TAB><TAB><TAB>nts = ddk.STATUS_SUCCESS <TAB>self.log_file_access(path, "open", disposition=None, access=ad) <TAB>if hfile: <TAB><TAB>self.mem_write(pHndl, hfile.to_bytes(self.ptr_size, "little")) <TAB>return nts "
"<TAB><TAB>npath = path <TAB><TAB>if path.startswith("\\??\\"): <TAB><TAB><TAB>npath = path.strip("\\??\\") <TAB><TAB>npath = npath.rstrip("\\") <TAB><TAB>hfile = emu.file_open(npath) <TAB><TAB>if hfile: <TAB><TAB><TAB>nts = ddk.STATUS_SUCCESS"
"<TAB><TAB>npath = path <TAB><TAB>if path.startswith("\\??\\"): <TAB><TAB><TAB>npath = path.strip("\\??\\") <MASK>"
"def ZwOpenFile(self, emu, argv, ctx={}): <TAB>""" <TAB>__kernel_entry NTSYSCALLAPI NTSTATUS NtOpenFile( <TAB> PHANDLE FileHandle, <TAB> ACCESS_MASK DesiredAccess, <TAB> POBJECT_ATTRIBUTES ObjectAttributes, <TAB> PIO_STATUS_BLOCK IoStatusBlock, <TAB> ULONG ShareAccess, <TAB> ULONG OpenOptions <TAB>); <TAB>""" <TAB>pHndl, access, objattr, statblock, share, open_opts = argv <TAB>nts = ddk.STATUS_OBJECT_NAME_NOT_FOUND <TAB>hfile = None <TAB>oa = self.win.OBJECT_ATTRIBUTES(emu.get_ptr_size()) <TAB>oa = self.mem_cast(oa, objattr) <TAB>path = self.read_unicode_string(oa.ObjectName) <TAB>argv[3] = path <TAB>ad = ddk.get_file_access_defines(access) <TAB>if ad: <TAB><TAB>argv[1] = " | ".join(ad) <TAB>obj = self.get_object_from_name(path) <TAB>if obj: <TAB><TAB>nts = ddk.STATUS_SUCCESS <TAB><TAB>hfile = self.get_object_handle(obj) <TAB>else: <TAB><TAB># Is a file being opened? <TAB><TAB>npath = path <TAB><TAB>if path.startswith("\\??\\"): <TAB><TAB><TAB>npath = path.strip("\\??\\") <MASK> <TAB>self.log_file_access(path, "open", disposition=None, access=ad) <TAB>if hfile: <TAB><TAB>self.mem_write(pHndl, hfile.to_bytes(self.ptr_size, "little")) <TAB>return nts "
"<TAB><TAB>npath = npath.rstrip("\\") <TAB><TAB>hfile = emu.file_open(npath) <TAB><TAB>if hfile: <TAB><TAB><TAB>nts = ddk.STATUS_SUCCESS"
"<TAB><TAB>npath = path <TAB><TAB>if path.startswith("\\??\\"): <TAB><TAB><TAB>npath = path.strip("\\??\\") <TAB><TAB>npath = npath.rstrip("\\") <TAB><TAB>hfile = emu.file_open(npath) <MASK>"
"def ZwOpenFile(self, emu, argv, ctx={}): <TAB>""" <TAB>__kernel_entry NTSYSCALLAPI NTSTATUS NtOpenFile( <TAB> PHANDLE FileHandle, <TAB> ACCESS_MASK DesiredAccess, <TAB> POBJECT_ATTRIBUTES ObjectAttributes, <TAB> PIO_STATUS_BLOCK IoStatusBlock, <TAB> ULONG ShareAccess, <TAB> ULONG OpenOptions <TAB>); <TAB>""" <TAB>pHndl, access, objattr, statblock, share, open_opts = argv <TAB>nts = ddk.STATUS_OBJECT_NAME_NOT_FOUND <TAB>hfile = None <TAB>oa = self.win.OBJECT_ATTRIBUTES(emu.get_ptr_size()) <TAB>oa = self.mem_cast(oa, objattr) <TAB>path = self.read_unicode_string(oa.ObjectName) <TAB>argv[3] = path <TAB>ad = ddk.get_file_access_defines(access) <TAB>if ad: <TAB><TAB>argv[1] = " | ".join(ad) <TAB>obj = self.get_object_from_name(path) <TAB>if obj: <TAB><TAB>nts = ddk.STATUS_SUCCESS <TAB><TAB>hfile = self.get_object_handle(obj) <TAB>else: <TAB><TAB># Is a file being opened? <TAB><TAB>npath = path <TAB><TAB>if path.startswith("\\??\\"): <TAB><TAB><TAB>npath = path.strip("\\??\\") <TAB><TAB>npath = npath.rstrip("\\") <TAB><TAB>hfile = emu.file_open(npath) <MASK> <TAB>self.log_file_access(path, "open", disposition=None, access=ad) <TAB>if hfile: <TAB><TAB>self.mem_write(pHndl, hfile.to_bytes(self.ptr_size, "little")) <TAB>return nts "
"<TAB><TAB>if hfile: <TAB><TAB><TAB>nts = ddk.STATUS_SUCCESS"
"<TAB><TAB>npath = path <TAB><TAB>if path.startswith("\\??\\"): <TAB><TAB><TAB>npath = path.strip("\\??\\") <TAB><TAB>npath = npath.rstrip("\\") <TAB><TAB>hfile = emu.file_open(npath) <TAB><TAB>if hfile: <MASK>"
"def ZwOpenFile(self, emu, argv, ctx={}): <TAB>""" <TAB>__kernel_entry NTSYSCALLAPI NTSTATUS NtOpenFile( <TAB> PHANDLE FileHandle, <TAB> ACCESS_MASK DesiredAccess, <TAB> POBJECT_ATTRIBUTES ObjectAttributes, <TAB> PIO_STATUS_BLOCK IoStatusBlock, <TAB> ULONG ShareAccess, <TAB> ULONG OpenOptions <TAB>); <TAB>""" <TAB>pHndl, access, objattr, statblock, share, open_opts = argv <TAB>nts = ddk.STATUS_OBJECT_NAME_NOT_FOUND <TAB>hfile = None <TAB>oa = self.win.OBJECT_ATTRIBUTES(emu.get_ptr_size()) <TAB>oa = self.mem_cast(oa, objattr) <TAB>path = self.read_unicode_string(oa.ObjectName) <TAB>argv[3] = path <TAB>ad = ddk.get_file_access_defines(access) <TAB>if ad: <TAB><TAB>argv[1] = " | ".join(ad) <TAB>obj = self.get_object_from_name(path) <TAB>if obj: <TAB><TAB>nts = ddk.STATUS_SUCCESS <TAB><TAB>hfile = self.get_object_handle(obj) <TAB>else: <TAB><TAB># Is a file being opened? <TAB><TAB>npath = path <TAB><TAB>if path.startswith("\\??\\"): <TAB><TAB><TAB>npath = path.strip("\\??\\") <TAB><TAB>npath = npath.rstrip("\\") <TAB><TAB>hfile = emu.file_open(npath) <TAB><TAB>if hfile: <MASK> <TAB>self.log_file_access(path, "open", disposition=None, access=ad) <TAB>if hfile: <TAB><TAB>self.mem_write(pHndl, hfile.to_bytes(self.ptr_size, "little")) <TAB>return nts "
"<TAB><TAB><TAB>nts = ddk.STATUS_SUCCESS"
"def prepare_request(next_link=None): <TAB># Construct headers <TAB>header_parameters = {} # type: Dict[str, Any] <TAB>header_parameters["Accept"] = self._serialize.header("accept", accept, "str") <TAB>if not next_link: <TAB><TAB># Construct URL <TAB><TAB>url = self.list.metadata["url"] # type: ignore <TAB><TAB>path_format_arguments = { <TAB><TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"vmScaleSetName": self._serialize.url( <TAB><TAB><TAB><TAB>"vm_scale_set_name", vm_scale_set_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"instanceId": self._serialize.url("instance_id", instance_id, "str"), <TAB><TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB><TAB>), <TAB><TAB>} <TAB><TAB>url = self._client.format_url(url, **path_format_arguments) <TAB><TAB># Construct parameters <TAB><TAB>query_parameters = {} # type: Dict[str, Any] <TAB><TAB>if expand is not None: <TAB><TAB><TAB>query_parameters["$expand"] = self._serialize.query("expand", expand, "str") <TAB><TAB>query_parameters["api-version"] = self._serialize.query( <TAB><TAB><TAB>"api_version", api_version, "str" <TAB><TAB>) <TAB><TAB>request = self._client.get(url, query_parameters, header_parameters) <TAB>else: <TAB><TAB>url = next_link <TAB><TAB>query_parameters = {} # type: Dict[str, Any] <TAB><TAB>request = self._client.get(url, query_parameters, header_parameters) <TAB>return request "
"<TAB><TAB>url = self.list.metadata["url"] # type: ignore <TAB><TAB>path_format_arguments = { <TAB><TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"vmScaleSetName": self._serialize.url( <TAB><TAB><TAB><TAB>"vm_scale_set_name", vm_scale_set_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"instanceId": self._serialize.url("instance_id", instance_id, "str"), <TAB><TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB><TAB>), <TAB><TAB>} <TAB><TAB>url = self._client.format_url(url, **path_format_arguments) <TAB><TAB># Construct parameters <TAB><TAB>query_parameters = {} # type: Dict[str, Any] <TAB><TAB>if expand is not None: <TAB><TAB><TAB>query_parameters["$expand"] = self._serialize.query("expand", expand, "str") <TAB><TAB>query_parameters["api-version"] = self._serialize.query( <TAB><TAB><TAB>"api_version", api_version, "str" <TAB><TAB>) <TAB><TAB>request = self._client.get(url, query_parameters, header_parameters)"
"<TAB><TAB>url = self.list.metadata["url"] # type: ignore <TAB><TAB>path_format_arguments = { <TAB><TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"vmScaleSetName": self._serialize.url( <TAB><TAB><TAB><TAB>"vm_scale_set_name", vm_scale_set_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"instanceId": self._serialize.url("instance_id", instance_id, "str"), <TAB><TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB><TAB>), <TAB><TAB>} <TAB><TAB>url = self._client.format_url(url, **path_format_arguments) <TAB><TAB># Construct parameters <TAB><TAB>query_parameters = {} # type: Dict[str, Any] <TAB><TAB>if expand is not None: <TAB><TAB><TAB>query_parameters["$expand"] = self._serialize.query("expand", expand, "str") <TAB><TAB>query_parameters["api-version"] =<MASK>"
"def prepare_request(next_link=None): <TAB># Construct headers <TAB>header_parameters = {} # type: Dict[str, Any] <TAB>header_parameters["Accept"] = self._serialize.header("accept", accept, "str") <TAB>if not next_link: <TAB><TAB># Construct URL <TAB><TAB>url = self.list.metadata["url"] # type: ignore <TAB><TAB>path_format_arguments = { <TAB><TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"vmScaleSetName": self._serialize.url( <TAB><TAB><TAB><TAB>"vm_scale_set_name", vm_scale_set_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"instanceId": self._serialize.url("instance_id", instance_id, "str"), <TAB><TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB><TAB>), <TAB><TAB>} <TAB><TAB>url = self._client.format_url(url, **path_format_arguments) <TAB><TAB># Construct parameters <TAB><TAB>query_parameters = {} # type: Dict[str, Any] <TAB><TAB>if expand is not None: <TAB><TAB><TAB>query_parameters["$expand"] = self._serialize.query("expand", expand, "str") <TAB><TAB>query_parameters["api-version"] =<MASK> <TAB>else: <TAB><TAB>url = next_link <TAB><TAB>query_parameters = {} # type: Dict[str, Any] <TAB><TAB>request = self._client.get(url, query_parameters, header_parameters) <TAB>return request "
"self._serialize.query( <TAB><TAB><TAB>"api_version", api_version, "str" <TAB><TAB>) <TAB><TAB>request = self._client.get(url, query_parameters, header_parameters)"
"<TAB><TAB>url = self.list.metadata["url"] # type: ignore <TAB><TAB>path_format_arguments = { <TAB><TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"vmScaleSetName": self._serialize.url( <TAB><TAB><TAB><TAB>"vm_scale_set_name", vm_scale_set_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"instanceId": self._serialize.url("instance_id", instance_id, "str"), <TAB><TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB><TAB>), <TAB><TAB>} <TAB><TAB>url = self._client.format_url(url, **path_format_arguments) <TAB><TAB># Construct parameters <TAB><TAB>query_parameters = {} # type: Dict[str, Any] <TAB><TAB>if expand is not None: <TAB><TAB><TAB>query_parameters["$expand"] = self._serialize.query("expand", expand, "str") <TAB><TAB>query_parameters["api-version"] = self._serialize.query( <TAB><TAB><TAB>"api_version", api_version, "str" <TAB><TAB>) <MASK>"
"def prepare_request(next_link=None): <TAB># Construct headers <TAB>header_parameters = {} # type: Dict[str, Any] <TAB>header_parameters["Accept"] = self._serialize.header("accept", accept, "str") <TAB>if not next_link: <TAB><TAB># Construct URL <TAB><TAB>url = self.list.metadata["url"] # type: ignore <TAB><TAB>path_format_arguments = { <TAB><TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"vmScaleSetName": self._serialize.url( <TAB><TAB><TAB><TAB>"vm_scale_set_name", vm_scale_set_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"instanceId": self._serialize.url("instance_id", instance_id, "str"), <TAB><TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB><TAB>), <TAB><TAB>} <TAB><TAB>url = self._client.format_url(url, **path_format_arguments) <TAB><TAB># Construct parameters <TAB><TAB>query_parameters = {} # type: Dict[str, Any] <TAB><TAB>if expand is not None: <TAB><TAB><TAB>query_parameters["$expand"] = self._serialize.query("expand", expand, "str") <TAB><TAB>query_parameters["api-version"] = self._serialize.query( <TAB><TAB><TAB>"api_version", api_version, "str" <TAB><TAB>) <MASK> <TAB>else: <TAB><TAB>url = next_link <TAB><TAB>query_parameters = {} # type: Dict[str, Any] <TAB><TAB>request = self._client.get(url, query_parameters, header_parameters) <TAB>return request "
"<TAB><TAB>request = self._client.get(url, query_parameters, header_parameters)"
"<TAB><TAB>url = self.list.metadata["url"] # type: ignore <TAB><TAB>path_format_arguments = { <TAB><TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"vmScaleSetName": self._serialize.url( <TAB><TAB><TAB><TAB>"vm_scale_set_name", vm_scale_set_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"instanceId": self._serialize.url("instance_id", instance_id, "str"), <TAB><TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB><TAB>), <TAB><TAB>} <TAB><TAB>url = self._client.format_url(url, **path_format_arguments) <TAB><TAB># Construct parameters <TAB><TAB>query_parameters = {} # type: Dict[str, Any] <TAB><TAB>if expand is not None: <TAB><TAB><TAB>query_parameters["$expand"] = self._serialize.query("expand", expand, "str") <TAB><TAB>query_parameters["api-version"] = self._serialize.query( <TAB><TAB><TAB>"api_version", api_version, "str" <TAB><TAB>) <TAB><TAB>request =<MASK>"
"def prepare_request(next_link=None): <TAB># Construct headers <TAB>header_parameters = {} # type: Dict[str, Any] <TAB>header_parameters["Accept"] = self._serialize.header("accept", accept, "str") <TAB>if not next_link: <TAB><TAB># Construct URL <TAB><TAB>url = self.list.metadata["url"] # type: ignore <TAB><TAB>path_format_arguments = { <TAB><TAB><TAB>"resourceGroupName": self._serialize.url( <TAB><TAB><TAB><TAB>"resource_group_name", resource_group_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"vmScaleSetName": self._serialize.url( <TAB><TAB><TAB><TAB>"vm_scale_set_name", vm_scale_set_name, "str" <TAB><TAB><TAB>), <TAB><TAB><TAB>"instanceId": self._serialize.url("instance_id", instance_id, "str"), <TAB><TAB><TAB>"subscriptionId": self._serialize.url( <TAB><TAB><TAB><TAB>"self._config.subscription_id", self._config.subscription_id, "str" <TAB><TAB><TAB>), <TAB><TAB>} <TAB><TAB>url = self._client.format_url(url, **path_format_arguments) <TAB><TAB># Construct parameters <TAB><TAB>query_parameters = {} # type: Dict[str, Any] <TAB><TAB>if expand is not None: <TAB><TAB><TAB>query_parameters["$expand"] = self._serialize.query("expand", expand, "str") <TAB><TAB>query_parameters["api-version"] = self._serialize.query( <TAB><TAB><TAB>"api_version", api_version, "str" <TAB><TAB>) <TAB><TAB>request =<MASK> <TAB>else: <TAB><TAB>url = next_link <TAB><TAB>query_parameters = {} # type: Dict[str, Any] <TAB><TAB>request = self._client.get(url, query_parameters, header_parameters) <TAB>return request "
"self._client.get(url, query_parameters, header_parameters)"
"def access( <TAB>self, path: str, name_or_id: Union[int, str] = None, required_perms: str = None ): <TAB>""" <TAB>Returns bool w.r.t the a user/group has permissions to read/write/execute a file. <TAB>This is a wrapper around os.access. But it would accept name or id instead of of just ids. <TAB>Also it can accept required permissions in the form of strings rather than os.F_OK, os.R_OK, os.W_OK etc. <TAB>*Implementation Note*: First we would check whether the current user has the required permissions. If not, <TAB>then we check the group to which this user belongs to. Finally if the user's group also does not meet the <TAB>perms we check for other permissions. <TAB>""" <TAB>try: <TAB><TAB>_path = self.norm_path(path) <TAB><TAB>_perms = self.getinfo(_path, namespaces=["access"]).permissions <TAB><TAB>_uid = self.getinfo(_path, namespaces=["access"]).uid <TAB><TAB>_gid = self.getinfo(_path, namespaces=["access"]).gid <TAB><TAB>if isinstance(required_perms, int): <TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif required_perms == W_OK: <TAB><TAB><TAB><TAB>required_perms = "w" <TAB><TAB># first we need to find the uid - in case username is provided instead of uid. <TAB><TAB>if isinstance(name_or_id, str): <TAB><TAB><TAB># must be username or group name <TAB><TAB><TAB># fetch the uid/gid of that uname/gname <TAB><TAB><TAB>[_id] = [k for k, v in self._users.items() if v == {"user": name_or_id}] <TAB><TAB>else: <TAB><TAB><TAB>_id = name_or_id <TAB><TAB># find the gid of this user. <TAB><TAB>_grp_id = None <TAB><TAB># FIXME: The above operation can cause incorrect results if one user belongs to more than one group. <TAB><TAB>for key, values in self._user_grps.items(): <TAB><TAB><TAB>if _id in values: <TAB><TAB><TAB><TAB>_grp_id = key <TAB><TAB>if _id is not None: <TAB><TAB><TAB>if _id == _uid: <TAB><TAB><TAB><TAB># provided id is the owner <TAB><TAB><TAB><TAB>return all([_perms.check("u_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>elif _grp_id and (_grp_id == _gid): <TAB><TAB><TAB><TAB># provided id is not the owner but belongs to that grp. <TAB><TAB><TAB><TAB># That means we would check it's group permissions. <TAB><TAB><TAB><TAB>return all([_perms.check("g_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB># id not equal to either in uid/gid <TAB><TAB><TAB><TAB># check other permissions <TAB><TAB><TAB><TAB>return all([_perms.check("o_" + i) for i in list(required_perms)]) <TAB>except (ValueError, AssertionError, KeyError, fs.errors.FSError) as err: <TAB><TAB>logger.info("Exception has occurred while doing fs.access: {}".format(err)) <TAB><TAB>logger.info("Returning False to avoid conpot crash") <TAB><TAB>return False "
"<TAB><TAB>_path = self.norm_path(path) <TAB><TAB>_perms = self.getinfo(_path, namespaces=["access"]).permissions <TAB><TAB>_uid = self.getinfo(_path, namespaces=["access"]).uid <TAB><TAB>_gid = self.getinfo(_path, namespaces=["access"]).gid <TAB><TAB>if isinstance(required_perms, int): <TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif required_perms == W_OK: <TAB><TAB><TAB><TAB>required_perms = "w" <TAB><TAB># first we need to find the uid - in case username is provided instead of uid. <TAB><TAB>if isinstance(name_or_id, str): <TAB><TAB><TAB># must be username or group name <TAB><TAB><TAB># fetch the uid/gid of that uname/gname <TAB><TAB><TAB>[_id] = [k for k, v in self._users.items() if v == {"user": name_or_id}] <TAB><TAB>else: <TAB><TAB><TAB>_id = name_or_id <TAB><TAB># find the gid of this user. <TAB><TAB>_grp_id = None <TAB><TAB># FIXME: The above operation can cause incorrect results if one user belongs to more than one group. <TAB><TAB>for key, values in self._user_grps.items(): <TAB><TAB><TAB>if _id in values: <TAB><TAB><TAB><TAB>_grp_id = key <TAB><TAB>if _id is not None: <TAB><TAB><TAB>if _id == _uid: <TAB><TAB><TAB><TAB># provided id is the owner <TAB><TAB><TAB><TAB>return all([_perms.check("u_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>elif _grp_id and (_grp_id == _gid): <TAB><TAB><TAB><TAB># provided id is not the owner but belongs to that grp. <TAB><TAB><TAB><TAB># That means we would check it's group permissions. <TAB><TAB><TAB><TAB>return all([_perms.check("g_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB># id not equal to either in uid/gid <TAB><TAB><TAB><TAB># check other permissions <TAB><TAB><TAB><TAB>return all([_perms.check("o_" + i) for i in list(required_perms)])"
"<TAB><TAB>_path = self.norm_path(path) <TAB><TAB>_perms = self.getinfo(_path, namespaces=["access"]).permissions <TAB><TAB>_uid = self.getinfo(_path, namespaces=["access"]).uid <TAB><TAB>_gid = self.getinfo(_path, namespaces=["access"]).gid <TAB><TAB>if isinstance(required_perms, int): <TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif required_perms == W_OK: <TAB><TAB><TAB><TAB>required_perms = "w" <TAB><TAB># first we need to find the uid - in case username is provided instead of uid. <TAB><TAB>if isinstance(name_or_id, str): <TAB><TAB><TAB># must be username or group name <TAB><TAB><TAB># fetch the uid/gid of that uname/gname <TAB><TAB><TAB>[_id] = [k for k, v in self._users.items() if v == {"user": name_or_id}] <TAB><TAB>else: <TAB><TAB><TAB>_id = name_or_id <TAB><TAB># find the gid of this user. <TAB><TAB>_grp_id = None <TAB><TAB># FIXME: The above operation can cause incorrect results if one user belongs to more than one group. <TAB><TAB>for key, values in self._user_grps.items(): <TAB><TAB><TAB>if _id in values: <TAB><TAB><TAB><TAB>_grp_id = key <TAB><TAB>if _id is not None: <TAB><TAB><TAB>if _id == _uid: <TAB><TAB><TAB><TAB># provided id is the owner <TAB><TAB><TAB><TAB>return all([_perms.check("u_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>elif _grp_id and (_grp_id == _gid): <TAB><TAB><TAB><TAB># provided id is not the owner but belongs to that grp. <TAB><TAB><TAB><TAB># That means we would check it's group permissions. <TAB><TAB><TAB><TAB>return<MASK>"
"def access( <TAB>self, path: str, name_or_id: Union[int, str] = None, required_perms: str = None ): <TAB>""" <TAB>Returns bool w.r.t the a user/group has permissions to read/write/execute a file. <TAB>This is a wrapper around os.access. But it would accept name or id instead of of just ids. <TAB>Also it can accept required permissions in the form of strings rather than os.F_OK, os.R_OK, os.W_OK etc. <TAB>*Implementation Note*: First we would check whether the current user has the required permissions. If not, <TAB>then we check the group to which this user belongs to. Finally if the user's group also does not meet the <TAB>perms we check for other permissions. <TAB>""" <TAB>try: <TAB><TAB>_path = self.norm_path(path) <TAB><TAB>_perms = self.getinfo(_path, namespaces=["access"]).permissions <TAB><TAB>_uid = self.getinfo(_path, namespaces=["access"]).uid <TAB><TAB>_gid = self.getinfo(_path, namespaces=["access"]).gid <TAB><TAB>if isinstance(required_perms, int): <TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif required_perms == W_OK: <TAB><TAB><TAB><TAB>required_perms = "w" <TAB><TAB># first we need to find the uid - in case username is provided instead of uid. <TAB><TAB>if isinstance(name_or_id, str): <TAB><TAB><TAB># must be username or group name <TAB><TAB><TAB># fetch the uid/gid of that uname/gname <TAB><TAB><TAB>[_id] = [k for k, v in self._users.items() if v == {"user": name_or_id}] <TAB><TAB>else: <TAB><TAB><TAB>_id = name_or_id <TAB><TAB># find the gid of this user. <TAB><TAB>_grp_id = None <TAB><TAB># FIXME: The above operation can cause incorrect results if one user belongs to more than one group. <TAB><TAB>for key, values in self._user_grps.items(): <TAB><TAB><TAB>if _id in values: <TAB><TAB><TAB><TAB>_grp_id = key <TAB><TAB>if _id is not None: <TAB><TAB><TAB>if _id == _uid: <TAB><TAB><TAB><TAB># provided id is the owner <TAB><TAB><TAB><TAB>return all([_perms.check("u_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>elif _grp_id and (_grp_id == _gid): <TAB><TAB><TAB><TAB># provided id is not the owner but belongs to that grp. <TAB><TAB><TAB><TAB># That means we would check it's group permissions. <TAB><TAB><TAB><TAB>return<MASK> <TAB>except (ValueError, AssertionError, KeyError, fs.errors.FSError) as err: <TAB><TAB>logger.info("Exception has occurred while doing fs.access: {}".format(err)) <TAB><TAB>logger.info("Returning False to avoid conpot crash") <TAB><TAB>return False "
"all([_perms.check("g_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>return all([_perms.check("o_" + i) for i in list(required_perms)])"
"<TAB><TAB>_path = self.norm_path(path) <TAB><TAB>_perms = self.getinfo(_path, namespaces=["access"]).permissions <TAB><TAB>_uid = self.getinfo(_path, namespaces=["access"]).uid <TAB><TAB>_gid = self.getinfo(_path, namespaces=["access"]).gid <TAB><TAB>if isinstance(required_perms, int): <TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif required_perms == W_OK: <TAB><TAB><TAB><TAB>required_perms = "w" <TAB><TAB># first we need to find the uid - in case username is provided instead of uid. <TAB><TAB>if isinstance(name_or_id, str): <TAB><TAB><TAB># must be username or group name <TAB><TAB><TAB># fetch the uid/gid of that uname/gname <TAB><TAB><TAB>[_id] = [k for k, v in self._users.items() if v == {"user": name_or_id}] <TAB><TAB>else: <TAB><TAB><TAB>_id = name_or_id <TAB><TAB># find the gid of this user. <TAB><TAB>_grp_id = None <TAB><TAB># FIXME: The above operation can cause incorrect results if one user belongs to more than one group. <TAB><TAB>for key, values in self._user_grps.items(): <TAB><TAB><TAB>if _id in values: <TAB><TAB><TAB><TAB>_grp_id = key <TAB><TAB>if _id is not None: <TAB><TAB><TAB>if _id == _uid: <TAB><TAB><TAB><TAB># provided id is the owner <TAB><TAB><TAB><TAB>return all([_perms.check("u_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>elif _grp_id and (_grp_id == _gid): <TAB><TAB><TAB><TAB># provided id is not the owner but belongs to that grp. <TAB><TAB><TAB><TAB># That means we would check it's group permissions. <TAB><TAB><TAB><TAB>return all([_perms.check("g_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB># id not equal to either in uid/gid <TAB><TAB><TAB><TAB># check other permissions <TAB><TAB><TAB><TAB>return<MASK>"
"def access( <TAB>self, path: str, name_or_id: Union[int, str] = None, required_perms: str = None ): <TAB>""" <TAB>Returns bool w.r.t the a user/group has permissions to read/write/execute a file. <TAB>This is a wrapper around os.access. But it would accept name or id instead of of just ids. <TAB>Also it can accept required permissions in the form of strings rather than os.F_OK, os.R_OK, os.W_OK etc. <TAB>*Implementation Note*: First we would check whether the current user has the required permissions. If not, <TAB>then we check the group to which this user belongs to. Finally if the user's group also does not meet the <TAB>perms we check for other permissions. <TAB>""" <TAB>try: <TAB><TAB>_path = self.norm_path(path) <TAB><TAB>_perms = self.getinfo(_path, namespaces=["access"]).permissions <TAB><TAB>_uid = self.getinfo(_path, namespaces=["access"]).uid <TAB><TAB>_gid = self.getinfo(_path, namespaces=["access"]).gid <TAB><TAB>if isinstance(required_perms, int): <TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif required_perms == W_OK: <TAB><TAB><TAB><TAB>required_perms = "w" <TAB><TAB># first we need to find the uid - in case username is provided instead of uid. <TAB><TAB>if isinstance(name_or_id, str): <TAB><TAB><TAB># must be username or group name <TAB><TAB><TAB># fetch the uid/gid of that uname/gname <TAB><TAB><TAB>[_id] = [k for k, v in self._users.items() if v == {"user": name_or_id}] <TAB><TAB>else: <TAB><TAB><TAB>_id = name_or_id <TAB><TAB># find the gid of this user. <TAB><TAB>_grp_id = None <TAB><TAB># FIXME: The above operation can cause incorrect results if one user belongs to more than one group. <TAB><TAB>for key, values in self._user_grps.items(): <TAB><TAB><TAB>if _id in values: <TAB><TAB><TAB><TAB>_grp_id = key <TAB><TAB>if _id is not None: <TAB><TAB><TAB>if _id == _uid: <TAB><TAB><TAB><TAB># provided id is the owner <TAB><TAB><TAB><TAB>return all([_perms.check("u_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>elif _grp_id and (_grp_id == _gid): <TAB><TAB><TAB><TAB># provided id is not the owner but belongs to that grp. <TAB><TAB><TAB><TAB># That means we would check it's group permissions. <TAB><TAB><TAB><TAB>return all([_perms.check("g_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB># id not equal to either in uid/gid <TAB><TAB><TAB><TAB># check other permissions <TAB><TAB><TAB><TAB>return<MASK> <TAB>except (ValueError, AssertionError, KeyError, fs.errors.FSError) as err: <TAB><TAB>logger.info("Exception has occurred while doing fs.access: {}".format(err)) <TAB><TAB>logger.info("Returning False to avoid conpot crash") <TAB><TAB>return False "
"all([_perms.check("o_" + i) for i in list(required_perms)])"
"<TAB><TAB>_path = self.norm_path(path) <TAB><TAB>_perms = self.getinfo(_path, namespaces=["access"]).permissions <TAB><TAB>_uid = self.getinfo(_path, namespaces=["access"]).uid <TAB><TAB>_gid = self.getinfo(_path, namespaces=["access"]).gid <TAB><TAB>if isinstance(required_perms, int): <TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif required_perms == W_OK: <TAB><TAB><TAB><TAB>required_perms = "w" <TAB><TAB># first we need to find the uid - in case username is provided instead of uid. <TAB><TAB>if isinstance(name_or_id, str): <TAB><TAB><TAB># must be username or group name <TAB><TAB><TAB># fetch the uid/gid of that uname/gname <TAB><TAB><TAB>[_id] = [k for k, v in self._users.items() if v == {"user": name_or_id}] <TAB><TAB>else: <TAB><TAB><TAB>_id = name_or_id <TAB><TAB># find the gid of this user. <TAB><TAB>_grp_id = None <TAB><TAB># FIXME: The above operation can cause incorrect results if one user belongs to more than one group. <TAB><TAB>for key, values in self._user_grps.items(): <TAB><TAB><TAB>if _id in values: <TAB><TAB><TAB><TAB>_grp_id = key <TAB><TAB>if _id is not None: <TAB><TAB><TAB>if _id == _uid: <TAB><TAB><TAB><TAB># provided id is the owner <TAB><TAB><TAB><TAB>return all([_perms.check("u_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>elif _grp_id and (_grp_id == _gid): <TAB><TAB><TAB><TAB># provided id is not the owner but belongs to that grp. <TAB><TAB><TAB><TAB># That means we would check it's group permissions. <TAB><TAB><TAB><TAB>return all([_perms.check("g_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB># id not equal to either in uid/gid <TAB><TAB><TAB><TAB># check other permissions <TAB><TAB><TAB><TAB>return all([_perms.check("o_"<MASK>"
"def access( <TAB>self, path: str, name_or_id: Union[int, str] = None, required_perms: str = None ): <TAB>""" <TAB>Returns bool w.r.t the a user/group has permissions to read/write/execute a file. <TAB>This is a wrapper around os.access. But it would accept name or id instead of of just ids. <TAB>Also it can accept required permissions in the form of strings rather than os.F_OK, os.R_OK, os.W_OK etc. <TAB>*Implementation Note*: First we would check whether the current user has the required permissions. If not, <TAB>then we check the group to which this user belongs to. Finally if the user's group also does not meet the <TAB>perms we check for other permissions. <TAB>""" <TAB>try: <TAB><TAB>_path = self.norm_path(path) <TAB><TAB>_perms = self.getinfo(_path, namespaces=["access"]).permissions <TAB><TAB>_uid = self.getinfo(_path, namespaces=["access"]).uid <TAB><TAB>_gid = self.getinfo(_path, namespaces=["access"]).gid <TAB><TAB>if isinstance(required_perms, int): <TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif required_perms == W_OK: <TAB><TAB><TAB><TAB>required_perms = "w" <TAB><TAB># first we need to find the uid - in case username is provided instead of uid. <TAB><TAB>if isinstance(name_or_id, str): <TAB><TAB><TAB># must be username or group name <TAB><TAB><TAB># fetch the uid/gid of that uname/gname <TAB><TAB><TAB>[_id] = [k for k, v in self._users.items() if v == {"user": name_or_id}] <TAB><TAB>else: <TAB><TAB><TAB>_id = name_or_id <TAB><TAB># find the gid of this user. <TAB><TAB>_grp_id = None <TAB><TAB># FIXME: The above operation can cause incorrect results if one user belongs to more than one group. <TAB><TAB>for key, values in self._user_grps.items(): <TAB><TAB><TAB>if _id in values: <TAB><TAB><TAB><TAB>_grp_id = key <TAB><TAB>if _id is not None: <TAB><TAB><TAB>if _id == _uid: <TAB><TAB><TAB><TAB># provided id is the owner <TAB><TAB><TAB><TAB>return all([_perms.check("u_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>elif _grp_id and (_grp_id == _gid): <TAB><TAB><TAB><TAB># provided id is not the owner but belongs to that grp. <TAB><TAB><TAB><TAB># That means we would check it's group permissions. <TAB><TAB><TAB><TAB>return all([_perms.check("g_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB># id not equal to either in uid/gid <TAB><TAB><TAB><TAB># check other permissions <TAB><TAB><TAB><TAB>return all([_perms.check("o_"<MASK> <TAB>except (ValueError, AssertionError, KeyError, fs.errors.FSError) as err: <TAB><TAB>logger.info("Exception has occurred while doing fs.access: {}".format(err)) <TAB><TAB>logger.info("Returning False to avoid conpot crash") <TAB><TAB>return False "
"+ i) for i in list(required_perms)])"
"def access( <TAB>self, path: str, name_or_id: Union[int, str] = None, required_perms: str = None ): <TAB>""" <TAB>Returns bool w.r.t the a user/group has permissions to read/write/execute a file. <TAB>This is a wrapper around os.access. But it would accept name or id instead of of just ids. <TAB>Also it can accept required permissions in the form of strings rather than os.F_OK, os.R_OK, os.W_OK etc. <TAB>*Implementation Note*: First we would check whether the current user has the required permissions. If not, <TAB>then we check the group to which this user belongs to. Finally if the user's group also does not meet the <TAB>perms we check for other permissions. <TAB>""" <TAB>try: <TAB><TAB>_path = self.norm_path(path) <TAB><TAB>_perms = self.getinfo(_path, namespaces=["access"]).permissions <TAB><TAB>_uid = self.getinfo(_path, namespaces=["access"]).uid <TAB><TAB>_gid = self.getinfo(_path, namespaces=["access"]).gid <TAB><TAB>if isinstance(required_perms, int): <TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif required_perms == W_OK: <TAB><TAB><TAB><TAB>required_perms = "w" <TAB><TAB># first we need to find the uid - in case username is provided instead of uid. <TAB><TAB>if isinstance(name_or_id, str): <TAB><TAB><TAB># must be username or group name <TAB><TAB><TAB># fetch the uid/gid of that uname/gname <TAB><TAB><TAB>[_id] = [k for k, v in self._users.items() if v == {"user": name_or_id}] <TAB><TAB>else: <TAB><TAB><TAB>_id = name_or_id <TAB><TAB># find the gid of this user. <TAB><TAB>_grp_id = None <TAB><TAB># FIXME: The above operation can cause incorrect results if one user belongs to more than one group. <TAB><TAB>for key, values in self._user_grps.items(): <TAB><TAB><TAB>if _id in values: <TAB><TAB><TAB><TAB>_grp_id = key <TAB><TAB>if _id is not None: <TAB><TAB><TAB>if _id == _uid: <TAB><TAB><TAB><TAB># provided id is the owner <TAB><TAB><TAB><TAB>return all([_perms.check("u_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>elif _grp_id and (_grp_id == _gid): <TAB><TAB><TAB><TAB># provided id is not the owner but belongs to that grp. <TAB><TAB><TAB><TAB># That means we would check it's group permissions. <TAB><TAB><TAB><TAB>return all([_perms.check("g_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB># id not equal to either in uid/gid <TAB><TAB><TAB><TAB># check other permissions <TAB><TAB><TAB><TAB>return all([_perms.check("o_" + i) for i in list(required_perms)]) <TAB>except (ValueError, AssertionError, KeyError, fs.errors.FSError) as err: <TAB><TAB>logger.info("Exception has occurred while doing fs.access: {}".format(err)) <TAB><TAB>logger.info("Returning False to avoid conpot crash") <TAB><TAB>return False "
"<TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif required_perms == W_OK: <TAB><TAB><TAB><TAB>required_perms = "w""
"<TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <MASK>"
"def access( <TAB>self, path: str, name_or_id: Union[int, str] = None, required_perms: str = None ): <TAB>""" <TAB>Returns bool w.r.t the a user/group has permissions to read/write/execute a file. <TAB>This is a wrapper around os.access. But it would accept name or id instead of of just ids. <TAB>Also it can accept required permissions in the form of strings rather than os.F_OK, os.R_OK, os.W_OK etc. <TAB>*Implementation Note*: First we would check whether the current user has the required permissions. If not, <TAB>then we check the group to which this user belongs to. Finally if the user's group also does not meet the <TAB>perms we check for other permissions. <TAB>""" <TAB>try: <TAB><TAB>_path = self.norm_path(path) <TAB><TAB>_perms = self.getinfo(_path, namespaces=["access"]).permissions <TAB><TAB>_uid = self.getinfo(_path, namespaces=["access"]).uid <TAB><TAB>_gid = self.getinfo(_path, namespaces=["access"]).gid <TAB><TAB>if isinstance(required_perms, int): <TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <MASK> <TAB><TAB># first we need to find the uid - in case username is provided instead of uid. <TAB><TAB>if isinstance(name_or_id, str): <TAB><TAB><TAB># must be username or group name <TAB><TAB><TAB># fetch the uid/gid of that uname/gname <TAB><TAB><TAB>[_id] = [k for k, v in self._users.items() if v == {"user": name_or_id}] <TAB><TAB>else: <TAB><TAB><TAB>_id = name_or_id <TAB><TAB># find the gid of this user. <TAB><TAB>_grp_id = None <TAB><TAB># FIXME: The above operation can cause incorrect results if one user belongs to more than one group. <TAB><TAB>for key, values in self._user_grps.items(): <TAB><TAB><TAB>if _id in values: <TAB><TAB><TAB><TAB>_grp_id = key <TAB><TAB>if _id is not None: <TAB><TAB><TAB>if _id == _uid: <TAB><TAB><TAB><TAB># provided id is the owner <TAB><TAB><TAB><TAB>return all([_perms.check("u_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>elif _grp_id and (_grp_id == _gid): <TAB><TAB><TAB><TAB># provided id is not the owner but belongs to that grp. <TAB><TAB><TAB><TAB># That means we would check it's group permissions. <TAB><TAB><TAB><TAB>return all([_perms.check("g_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB># id not equal to either in uid/gid <TAB><TAB><TAB><TAB># check other permissions <TAB><TAB><TAB><TAB>return all([_perms.check("o_" + i) for i in list(required_perms)]) <TAB>except (ValueError, AssertionError, KeyError, fs.errors.FSError) as err: <TAB><TAB>logger.info("Exception has occurred while doing fs.access: {}".format(err)) <TAB><TAB>logger.info("Returning False to avoid conpot crash") <TAB><TAB>return False "
"<TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif required_perms == W_OK: <TAB><TAB><TAB><TAB>required_perms = "w""
"<TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif<MASK>"
"def access( <TAB>self, path: str, name_or_id: Union[int, str] = None, required_perms: str = None ): <TAB>""" <TAB>Returns bool w.r.t the a user/group has permissions to read/write/execute a file. <TAB>This is a wrapper around os.access. But it would accept name or id instead of of just ids. <TAB>Also it can accept required permissions in the form of strings rather than os.F_OK, os.R_OK, os.W_OK etc. <TAB>*Implementation Note*: First we would check whether the current user has the required permissions. If not, <TAB>then we check the group to which this user belongs to. Finally if the user's group also does not meet the <TAB>perms we check for other permissions. <TAB>""" <TAB>try: <TAB><TAB>_path = self.norm_path(path) <TAB><TAB>_perms = self.getinfo(_path, namespaces=["access"]).permissions <TAB><TAB>_uid = self.getinfo(_path, namespaces=["access"]).uid <TAB><TAB>_gid = self.getinfo(_path, namespaces=["access"]).gid <TAB><TAB>if isinstance(required_perms, int): <TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif<MASK> <TAB><TAB># first we need to find the uid - in case username is provided instead of uid. <TAB><TAB>if isinstance(name_or_id, str): <TAB><TAB><TAB># must be username or group name <TAB><TAB><TAB># fetch the uid/gid of that uname/gname <TAB><TAB><TAB>[_id] = [k for k, v in self._users.items() if v == {"user": name_or_id}] <TAB><TAB>else: <TAB><TAB><TAB>_id = name_or_id <TAB><TAB># find the gid of this user. <TAB><TAB>_grp_id = None <TAB><TAB># FIXME: The above operation can cause incorrect results if one user belongs to more than one group. <TAB><TAB>for key, values in self._user_grps.items(): <TAB><TAB><TAB>if _id in values: <TAB><TAB><TAB><TAB>_grp_id = key <TAB><TAB>if _id is not None: <TAB><TAB><TAB>if _id == _uid: <TAB><TAB><TAB><TAB># provided id is the owner <TAB><TAB><TAB><TAB>return all([_perms.check("u_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>elif _grp_id and (_grp_id == _gid): <TAB><TAB><TAB><TAB># provided id is not the owner but belongs to that grp. <TAB><TAB><TAB><TAB># That means we would check it's group permissions. <TAB><TAB><TAB><TAB>return all([_perms.check("g_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB># id not equal to either in uid/gid <TAB><TAB><TAB><TAB># check other permissions <TAB><TAB><TAB><TAB>return all([_perms.check("o_" + i) for i in list(required_perms)]) <TAB>except (ValueError, AssertionError, KeyError, fs.errors.FSError) as err: <TAB><TAB>logger.info("Exception has occurred while doing fs.access: {}".format(err)) <TAB><TAB>logger.info("Returning False to avoid conpot crash") <TAB><TAB>return False "
"required_perms == W_OK: <TAB><TAB><TAB><TAB>required_perms = "w""
"<TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif required_perms == W_OK: <MASK>"
"def access( <TAB>self, path: str, name_or_id: Union[int, str] = None, required_perms: str = None ): <TAB>""" <TAB>Returns bool w.r.t the a user/group has permissions to read/write/execute a file. <TAB>This is a wrapper around os.access. But it would accept name or id instead of of just ids. <TAB>Also it can accept required permissions in the form of strings rather than os.F_OK, os.R_OK, os.W_OK etc. <TAB>*Implementation Note*: First we would check whether the current user has the required permissions. If not, <TAB>then we check the group to which this user belongs to. Finally if the user's group also does not meet the <TAB>perms we check for other permissions. <TAB>""" <TAB>try: <TAB><TAB>_path = self.norm_path(path) <TAB><TAB>_perms = self.getinfo(_path, namespaces=["access"]).permissions <TAB><TAB>_uid = self.getinfo(_path, namespaces=["access"]).uid <TAB><TAB>_gid = self.getinfo(_path, namespaces=["access"]).gid <TAB><TAB>if isinstance(required_perms, int): <TAB><TAB><TAB>if required_perms == F_OK: <TAB><TAB><TAB><TAB>return True <TAB><TAB><TAB>elif required_perms == R_OK: <TAB><TAB><TAB><TAB>required_perms = "r" <TAB><TAB><TAB>elif required_perms == W_OK: <MASK> <TAB><TAB># first we need to find the uid - in case username is provided instead of uid. <TAB><TAB>if isinstance(name_or_id, str): <TAB><TAB><TAB># must be username or group name <TAB><TAB><TAB># fetch the uid/gid of that uname/gname <TAB><TAB><TAB>[_id] = [k for k, v in self._users.items() if v == {"user": name_or_id}] <TAB><TAB>else: <TAB><TAB><TAB>_id = name_or_id <TAB><TAB># find the gid of this user. <TAB><TAB>_grp_id = None <TAB><TAB># FIXME: The above operation can cause incorrect results if one user belongs to more than one group. <TAB><TAB>for key, values in self._user_grps.items(): <TAB><TAB><TAB>if _id in values: <TAB><TAB><TAB><TAB>_grp_id = key <TAB><TAB>if _id is not None: <TAB><TAB><TAB>if _id == _uid: <TAB><TAB><TAB><TAB># provided id is the owner <TAB><TAB><TAB><TAB>return all([_perms.check("u_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>elif _grp_id and (_grp_id == _gid): <TAB><TAB><TAB><TAB># provided id is not the owner but belongs to that grp. <TAB><TAB><TAB><TAB># That means we would check it's group permissions. <TAB><TAB><TAB><TAB>return all([_perms.check("g_" + i) for i in list(required_perms)]) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB># id not equal to either in uid/gid <TAB><TAB><TAB><TAB># check other permissions <TAB><TAB><TAB><TAB>return all([_perms.check("o_" + i) for i in list(required_perms)]) <TAB>except (ValueError, AssertionError, KeyError, fs.errors.FSError) as err: <TAB><TAB>logger.info("Exception has occurred while doing fs.access: {}".format(err)) <TAB><TAB>logger.info("Returning False to avoid conpot crash") <TAB><TAB>return False "
"<TAB><TAB><TAB><TAB>required_perms = "w""
"def _set_fortune(self, state=None, new=False): <TAB>if not self.fortune_command: <TAB><TAB>return <TAB>if new: <TAB><TAB>try: <TAB><TAB><TAB>fortune_data = self.py3.command_output(self.fortune_command) <TAB><TAB>except self.py3.CommandError: <TAB><TAB><TAB>self.fortune = "" <TAB><TAB><TAB>self.fortune_command = None <TAB><TAB>else: <TAB><TAB><TAB>self.fortune = " ".join(fortune_data.split()) <TAB><TAB><TAB>self.time = time() + self.fortune_timeout <TAB>elif state is None: <TAB><TAB>if self.toggled and time() >= self.time: <TAB><TAB><TAB>self._set_fortune(new=True) <TAB>else: <TAB><TAB>self.toggled = state <TAB><TAB>if state: <TAB><TAB><TAB>self._set_fortune(new=True) <TAB><TAB>else: <TAB><TAB><TAB>self.fortune = None "
"<TAB><TAB>try: <TAB><TAB><TAB>fortune_data = self.py3.command_output(self.fortune_command) <TAB><TAB>except self.py3.CommandError: <TAB><TAB><TAB>self.fortune = "" <TAB><TAB><TAB>self.fortune_command = None <TAB><TAB>else: <TAB><TAB><TAB>self.fortune = " ".join(fortune_data.split()) <TAB><TAB><TAB>self.time = time() + self.fortune_timeout"
"<TAB><TAB>try: <TAB><TAB><TAB>fortune_data = self.py3.command_output(self.fortune_command) <TAB><TAB>except self.py3.CommandError: <TAB><TAB><TAB>self.fortune = "" <TAB><TAB><TAB>self.fortune_command =<MASK>"
"def _set_fortune(self, state=None, new=False): <TAB>if not self.fortune_command: <TAB><TAB>return <TAB>if new: <TAB><TAB>try: <TAB><TAB><TAB>fortune_data = self.py3.command_output(self.fortune_command) <TAB><TAB>except self.py3.CommandError: <TAB><TAB><TAB>self.fortune = "" <TAB><TAB><TAB>self.fortune_command =<MASK> <TAB>elif state is None: <TAB><TAB>if self.toggled and time() >= self.time: <TAB><TAB><TAB>self._set_fortune(new=True) <TAB>else: <TAB><TAB>self.toggled = state <TAB><TAB>if state: <TAB><TAB><TAB>self._set_fortune(new=True) <TAB><TAB>else: <TAB><TAB><TAB>self.fortune = None "
"None <TAB><TAB>else: <TAB><TAB><TAB>self.fortune = " ".join(fortune_data.split()) <TAB><TAB><TAB>self.time = time() + self.fortune_timeout"
"<TAB><TAB>try: <TAB><TAB><TAB>fortune_data = self.py3.command_output(self.fortune_command) <TAB><TAB>except self.py3.CommandError: <TAB><TAB><TAB>self.fortune = "" <TAB><TAB><TAB>self.fortune_command = None <TAB><TAB>else: <TAB><TAB><TAB>self.fortune = "<MASK>"
"def _set_fortune(self, state=None, new=False): <TAB>if not self.fortune_command: <TAB><TAB>return <TAB>if new: <TAB><TAB>try: <TAB><TAB><TAB>fortune_data = self.py3.command_output(self.fortune_command) <TAB><TAB>except self.py3.CommandError: <TAB><TAB><TAB>self.fortune = "" <TAB><TAB><TAB>self.fortune_command = None <TAB><TAB>else: <TAB><TAB><TAB>self.fortune = "<MASK> <TAB>elif state is None: <TAB><TAB>if self.toggled and time() >= self.time: <TAB><TAB><TAB>self._set_fortune(new=True) <TAB>else: <TAB><TAB>self.toggled = state <TAB><TAB>if state: <TAB><TAB><TAB>self._set_fortune(new=True) <TAB><TAB>else: <TAB><TAB><TAB>self.fortune = None "
"".join(fortune_data.split()) <TAB><TAB><TAB>self.time = time() + self.fortune_timeout"
"<TAB><TAB>try: <TAB><TAB><TAB>fortune_data = self.py3.command_output(self.fortune_command) <TAB><TAB>except self.py3.CommandError: <TAB><TAB><TAB>self.fortune = "" <TAB><TAB><TAB>self.fortune_command = None <TAB><TAB>else: <TAB><TAB><TAB>self.fortune = " ".join(fortune_data.split()) <MASK>"
"def _set_fortune(self, state=None, new=False): <TAB>if not self.fortune_command: <TAB><TAB>return <TAB>if new: <TAB><TAB>try: <TAB><TAB><TAB>fortune_data = self.py3.command_output(self.fortune_command) <TAB><TAB>except self.py3.CommandError: <TAB><TAB><TAB>self.fortune = "" <TAB><TAB><TAB>self.fortune_command = None <TAB><TAB>else: <TAB><TAB><TAB>self.fortune = " ".join(fortune_data.split()) <MASK> <TAB>elif state is None: <TAB><TAB>if self.toggled and time() >= self.time: <TAB><TAB><TAB>self._set_fortune(new=True) <TAB>else: <TAB><TAB>self.toggled = state <TAB><TAB>if state: <TAB><TAB><TAB>self._set_fortune(new=True) <TAB><TAB>else: <TAB><TAB><TAB>self.fortune = None "
"<TAB><TAB><TAB>self.time = time() + self.fortune_timeout"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>)"
"<TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <MASK> <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>)"
"<TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill<MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill<MASK> <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>)"
"<TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling<MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling<MASK> <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>)"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>)"
"<MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <MASK> <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>)"
"<TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <MASK> <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>)"
"<TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are<MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are<MASK> <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>)"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>)"
"<TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <MASK> <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>)"
"<TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <MASK> <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>)"
"<TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not<MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not<MASK> <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>)"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>)"
"<TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <MASK> <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>)"
"<TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <MASK> <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>)"
"<TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway<MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway<MASK> <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"{ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>)"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}")"
"<TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <MASK> <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}")"
"<TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <MASK> <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}")"
"<TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <MASK> <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}")"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>) <TAB>return verrors "
"<TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>)"
"<TAB><TAB>if<MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if<MASK> <TAB>return verrors "
"len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>)"
"<TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <MASK> <TAB>return verrors "
"<TAB><TAB><TAB><TAB>f"{schema}.domains", "No more than 5 additional domains are allowed" <TAB><TAB><TAB>)"
"<TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more<MASK>"
"def validate_general_settings(self, data, schema): <TAB>verrors = ValidationErrors() <TAB>for key in [key for key in data.keys() if "nameserver" in key]: <TAB><TAB>nameserver_value = data.get(key) <TAB><TAB>if nameserver_value: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>nameserver_ip = ipaddress.ip_address(nameserver_value) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", str(e)) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>if nameserver_ip.is_loopback: <TAB><TAB><TAB><TAB><TAB>verrors.add(f"{schema}.{key}", "Loopback is not a valid nameserver") <TAB><TAB><TAB><TAB>elif nameserver_ip.is_unspecified: <TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB>"Unspecified addresses are not valid as nameservers", <TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>elif nameserver_ip.version == 4: <TAB><TAB><TAB><TAB><TAB>if nameserver_value == "255.255.255.255": <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", "This is not a valid nameserver address" <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB><TAB>elif nameserver_value.startswith("169.254"): <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>"169.254/16 subnet is not valid for nameserver", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB>nameserver_number = int(key[-1]) <TAB><TAB><TAB><TAB>for i in range(nameserver_number - 1, 0, -1): <TAB><TAB><TAB><TAB><TAB>if f"nameserver{i}" in data.keys() and not data[f"nameserver{i}"]: <TAB><TAB><TAB><TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"{schema}.{key}", <TAB><TAB><TAB><TAB><TAB><TAB><TAB>f"Must fill out namserver{i} before filling out {key}", <TAB><TAB><TAB><TAB><TAB><TAB>) <TAB>ipv4_gateway_value = data.get("ipv4gateway") <TAB>if ipv4_gateway_value: <TAB><TAB>if not await self.middleware.call( <TAB><TAB><TAB>"route.ipv4gw_reachable", ipaddress.ip_address(ipv4_gateway_value).exploded <TAB><TAB>): <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.ipv4gateway", f"Gateway {ipv4_gateway_value} is unreachable" <TAB><TAB><TAB>) <TAB>netwait_ip = data.get("netwait_ip") <TAB>if netwait_ip: <TAB><TAB>for ip in netwait_ip: <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>ipaddress.ip_address(ip) <TAB><TAB><TAB>except ValueError as e: <TAB><TAB><TAB><TAB>verrors.add(f"{schema}.netwait_ip", f"{e.__str__()}") <TAB>if data.get("domains"): <TAB><TAB>if len(data.get("domains")) > 5: <TAB><TAB><TAB>verrors.add( <TAB><TAB><TAB><TAB>f"{schema}.domains", "No more<MASK> <TAB>return verrors "
"than 5 additional domains are allowed" <TAB><TAB><TAB>)"
"def get_value_by_dot(doc, key, can_generate_array=False): <TAB>"""Get dictionary value using dotted key""" <TAB>result = doc <TAB>key_items = key.split(".") <TAB>for key_index, key_item in enumerate(key_items): <TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB>else: <TAB><TAB><TAB>raise KeyError(key_index) <TAB>return result "
"<TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB>else: <TAB><TAB><TAB>raise KeyError(key_index)"
"<TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <MASK>"
"def get_value_by_dot(doc, key, can_generate_array=False): <TAB>"""Get dictionary value using dotted key""" <TAB>result = doc <TAB>key_items = key.split(".") <TAB>for key_index, key_item in enumerate(key_items): <TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <MASK> <TAB>return result "
"<TAB><TAB><TAB>except (ValueError, IndexError) as err: <TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB>else: <TAB><TAB><TAB>raise KeyError(key_index)"
"<TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <MASK>"
"def get_value_by_dot(doc, key, can_generate_array=False): <TAB>"""Get dictionary value using dotted key""" <TAB>result = doc <TAB>key_items = key.split(".") <TAB>for key_index, key_item in enumerate(key_items): <TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <MASK> <TAB>return result "
"<TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB>else: <TAB><TAB><TAB>raise KeyError(key_index)"
"<TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <MASK>"
"def get_value_by_dot(doc, key, can_generate_array=False): <TAB>"""Get dictionary value using dotted key""" <TAB>result = doc <TAB>key_items = key.split(".") <TAB>for key_index, key_item in enumerate(key_items): <TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <MASK> <TAB>return result "
"<TAB><TAB>else: <TAB><TAB><TAB>raise KeyError(key_index)"
"def get_value_by_dot(doc, key, can_generate_array=False): <TAB>"""Get dictionary value using dotted key""" <TAB>result = doc <TAB>key_items = key.split(".") <TAB>for key_index, key_item in enumerate(key_items): <TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB>else: <TAB><TAB><TAB>raise KeyError(key_index) <TAB>return result "
"<TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err)"
"<TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <MASK>"
"def get_value_by_dot(doc, key, can_generate_array=False): <TAB>"""Get dictionary value using dotted key""" <TAB>result = doc <TAB>key_items = key.split(".") <TAB>for key_index, key_item in enumerate(key_items): <TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <MASK> <TAB><TAB>else: <TAB><TAB><TAB>raise KeyError(key_index) <TAB>return result "
"<TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err)"
"<TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError)<MASK>"
"def get_value_by_dot(doc, key, can_generate_array=False): <TAB>"""Get dictionary value using dotted key""" <TAB>result = doc <TAB>key_items = key.split(".") <TAB>for key_index, key_item in enumerate(key_items): <TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError)<MASK> <TAB><TAB>else: <TAB><TAB><TAB>raise KeyError(key_index) <TAB>return result "
"as err: <TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err)"
"<TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <MASK>"
"def get_value_by_dot(doc, key, can_generate_array=False): <TAB>"""Get dictionary value using dotted key""" <TAB>result = doc <TAB>key_items = key.split(".") <TAB>for key_index, key_item in enumerate(key_items): <TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <MASK> <TAB><TAB>else: <TAB><TAB><TAB>raise KeyError(key_index) <TAB>return result "
"<TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err)"
"def get_value_by_dot(doc, key, can_generate_array=False): <TAB>"""Get dictionary value using dotted key""" <TAB>result = doc <TAB>key_items = key.split(".") <TAB>for key_index, key_item in enumerate(key_items): <TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result] <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB>else: <TAB><TAB><TAB>raise KeyError(key_index) <TAB>return result "
"<TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result]"
"<TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <MASK>"
"def get_value_by_dot(doc, key, can_generate_array=False): <TAB>"""Get dictionary value using dotted key""" <TAB>result = doc <TAB>key_items = key.split(".") <TAB>for key_index, key_item in enumerate(key_items): <TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <MASK> <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB>else: <TAB><TAB><TAB>raise KeyError(key_index) <TAB>return result "
"<TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return [get_value_by_dot(subdoc, remaining_key) for subdoc in result]"
"<TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return<MASK>"
"def get_value_by_dot(doc, key, can_generate_array=False): <TAB>"""Get dictionary value using dotted key""" <TAB>result = doc <TAB>key_items = key.split(".") <TAB>for key_index, key_item in enumerate(key_items): <TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return<MASK> <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB>else: <TAB><TAB><TAB>raise KeyError(key_index) <TAB>return result "
"[get_value_by_dot(subdoc, remaining_key) for subdoc in result]"
"<TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return<MASK>"
"def get_value_by_dot(doc, key, can_generate_array=False): <TAB>"""Get dictionary value using dotted key""" <TAB>result = doc <TAB>key_items = key.split(".") <TAB>for key_index, key_item in enumerate(key_items): <TAB><TAB>if isinstance(result, dict): <TAB><TAB><TAB>result = result[key_item] <TAB><TAB>elif isinstance(result, (list, tuple)): <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>int_key = int(key_item) <TAB><TAB><TAB>except ValueError as err: <TAB><TAB><TAB><TAB>if not can_generate_array: <TAB><TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB><TAB><TAB>remaining_key = ".".join(key_items[key_index:]) <TAB><TAB><TAB><TAB>return<MASK> <TAB><TAB><TAB>try: <TAB><TAB><TAB><TAB>result = result[int_key] <TAB><TAB><TAB>except (ValueError, IndexError) as err: <TAB><TAB><TAB><TAB>raise_from(KeyError(key_index), err) <TAB><TAB>else: <TAB><TAB><TAB>raise KeyError(key_index) <TAB>return result "
"[get_value_by_dot(subdoc, remaining_key) for subdoc in result]"
"def getpager(): <TAB>"""Decide what method to use for paging through text.""" <TAB>if type(sys.stdout) is not types.FileType: <TAB><TAB>return plainpager <TAB>if not sys.stdin.isatty() or not sys.stdout.isatty(): <TAB><TAB>return plainpager <TAB>if "PAGER" in os.environ: <TAB><TAB>if sys.platform == "win32": # pipes completely broken in Windows <TAB><TAB><TAB>return lambda text: tempfilepager(plain(text), os.environ["PAGER"]) <TAB><TAB>elif os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB><TAB>return lambda text: pipepager(plain(text), os.environ["PAGER"]) <TAB><TAB>else: <TAB><TAB><TAB>return lambda text: pipepager(text, os.environ["PAGER"]) <TAB>if os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB>return plainpager <TAB>if sys.platform == "win32" or sys.platform.startswith("os2"): <TAB><TAB>return lambda text: tempfilepager(plain(text), "more <") <TAB>if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0: <TAB><TAB>return lambda text: pipepager(text, "less") <TAB>import tempfile <TAB>(fd, filename) = tempfile.mkstemp() <TAB>os.close(fd) <TAB>try: <TAB><TAB>if hasattr(os, "system") and os.system('more "%s"' % filename) == 0: <TAB><TAB><TAB>return lambda text: pipepager(text, "more") <TAB><TAB>else: <TAB><TAB><TAB>return ttypager <TAB>finally: <TAB><TAB>os.unlink(filename) "
"<TAB><TAB>if sys.platform == "win32": # pipes completely broken in Windows <TAB><TAB><TAB>return lambda text: tempfilepager(plain(text), os.environ["PAGER"]) <TAB><TAB>elif os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB><TAB>return lambda text: pipepager(plain(text), os.environ["PAGER"]) <TAB><TAB>else: <TAB><TAB><TAB>return lambda text: pipepager(text, os.environ["PAGER"])"
"<TAB><TAB>if sys.platform == "win32": # pipes completely broken in Windows <TAB><TAB><TAB>return lambda text: tempfilepager(plain(text), os.environ["PAGER"]) <TAB><TAB>elif os.environ.get("TERM") in ("dumb",<MASK>"
"def getpager(): <TAB>"""Decide what method to use for paging through text.""" <TAB>if type(sys.stdout) is not types.FileType: <TAB><TAB>return plainpager <TAB>if not sys.stdin.isatty() or not sys.stdout.isatty(): <TAB><TAB>return plainpager <TAB>if "PAGER" in os.environ: <TAB><TAB>if sys.platform == "win32": # pipes completely broken in Windows <TAB><TAB><TAB>return lambda text: tempfilepager(plain(text), os.environ["PAGER"]) <TAB><TAB>elif os.environ.get("TERM") in ("dumb",<MASK> <TAB>if os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB>return plainpager <TAB>if sys.platform == "win32" or sys.platform.startswith("os2"): <TAB><TAB>return lambda text: tempfilepager(plain(text), "more <") <TAB>if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0: <TAB><TAB>return lambda text: pipepager(text, "less") <TAB>import tempfile <TAB>(fd, filename) = tempfile.mkstemp() <TAB>os.close(fd) <TAB>try: <TAB><TAB>if hasattr(os, "system") and os.system('more "%s"' % filename) == 0: <TAB><TAB><TAB>return lambda text: pipepager(text, "more") <TAB><TAB>else: <TAB><TAB><TAB>return ttypager <TAB>finally: <TAB><TAB>os.unlink(filename) "
""emacs"): <TAB><TAB><TAB>return lambda text: pipepager(plain(text), os.environ["PAGER"]) <TAB><TAB>else: <TAB><TAB><TAB>return lambda text: pipepager(text, os.environ["PAGER"])"
"<TAB><TAB>if sys.platform == "win32": # pipes completely broken in Windows <TAB><TAB><TAB>return lambda text: tempfilepager(plain(text), os.environ["PAGER"]) <TAB><TAB>elif os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB><TAB>return lambda text: pipepager(plain(text), os.environ["PAGER"]) <MASK>"
"def getpager(): <TAB>"""Decide what method to use for paging through text.""" <TAB>if type(sys.stdout) is not types.FileType: <TAB><TAB>return plainpager <TAB>if not sys.stdin.isatty() or not sys.stdout.isatty(): <TAB><TAB>return plainpager <TAB>if "PAGER" in os.environ: <TAB><TAB>if sys.platform == "win32": # pipes completely broken in Windows <TAB><TAB><TAB>return lambda text: tempfilepager(plain(text), os.environ["PAGER"]) <TAB><TAB>elif os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB><TAB>return lambda text: pipepager(plain(text), os.environ["PAGER"]) <MASK> <TAB>if os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB>return plainpager <TAB>if sys.platform == "win32" or sys.platform.startswith("os2"): <TAB><TAB>return lambda text: tempfilepager(plain(text), "more <") <TAB>if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0: <TAB><TAB>return lambda text: pipepager(text, "less") <TAB>import tempfile <TAB>(fd, filename) = tempfile.mkstemp() <TAB>os.close(fd) <TAB>try: <TAB><TAB>if hasattr(os, "system") and os.system('more "%s"' % filename) == 0: <TAB><TAB><TAB>return lambda text: pipepager(text, "more") <TAB><TAB>else: <TAB><TAB><TAB>return ttypager <TAB>finally: <TAB><TAB>os.unlink(filename) "
"<TAB><TAB>else: <TAB><TAB><TAB>return lambda text: pipepager(text, os.environ["PAGER"])"
"<TAB><TAB>if sys.platform == "win32": # pipes completely broken in Windows <TAB><TAB><TAB>return lambda text: tempfilepager(plain(text), os.environ["PAGER"]) <TAB><TAB>elif os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB><TAB>return lambda text: pipepager(plain(text), os.environ["PAGER"]) <TAB><TAB>else: <TAB><TAB><TAB>return lambda<MASK>"
"def getpager(): <TAB>"""Decide what method to use for paging through text.""" <TAB>if type(sys.stdout) is not types.FileType: <TAB><TAB>return plainpager <TAB>if not sys.stdin.isatty() or not sys.stdout.isatty(): <TAB><TAB>return plainpager <TAB>if "PAGER" in os.environ: <TAB><TAB>if sys.platform == "win32": # pipes completely broken in Windows <TAB><TAB><TAB>return lambda text: tempfilepager(plain(text), os.environ["PAGER"]) <TAB><TAB>elif os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB><TAB>return lambda text: pipepager(plain(text), os.environ["PAGER"]) <TAB><TAB>else: <TAB><TAB><TAB>return lambda<MASK> <TAB>if os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB>return plainpager <TAB>if sys.platform == "win32" or sys.platform.startswith("os2"): <TAB><TAB>return lambda text: tempfilepager(plain(text), "more <") <TAB>if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0: <TAB><TAB>return lambda text: pipepager(text, "less") <TAB>import tempfile <TAB>(fd, filename) = tempfile.mkstemp() <TAB>os.close(fd) <TAB>try: <TAB><TAB>if hasattr(os, "system") and os.system('more "%s"' % filename) == 0: <TAB><TAB><TAB>return lambda text: pipepager(text, "more") <TAB><TAB>else: <TAB><TAB><TAB>return ttypager <TAB>finally: <TAB><TAB>os.unlink(filename) "
"text: pipepager(text, os.environ["PAGER"])"
"def getpager(): <TAB>"""Decide what method to use for paging through text.""" <TAB>if type(sys.stdout) is not types.FileType: <TAB><TAB>return plainpager <TAB>if not sys.stdin.isatty() or not sys.stdout.isatty(): <TAB><TAB>return plainpager <TAB>if "PAGER" in os.environ: <TAB><TAB>if sys.platform == "win32": # pipes completely broken in Windows <TAB><TAB><TAB>return lambda text: tempfilepager(plain(text), os.environ["PAGER"]) <TAB><TAB>elif os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB><TAB>return lambda text: pipepager(plain(text), os.environ["PAGER"]) <TAB><TAB>else: <TAB><TAB><TAB>return lambda text: pipepager(text, os.environ["PAGER"]) <TAB>if os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB>return plainpager <TAB>if sys.platform == "win32" or sys.platform.startswith("os2"): <TAB><TAB>return lambda text: tempfilepager(plain(text), "more <") <TAB>if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0: <TAB><TAB>return lambda text: pipepager(text, "less") <TAB>import tempfile <TAB>(fd, filename) = tempfile.mkstemp() <TAB>os.close(fd) <TAB>try: <TAB><TAB>if hasattr(os, "system") and os.system('more "%s"' % filename) == 0: <TAB><TAB><TAB>return lambda text: pipepager(text, "more") <TAB><TAB>else: <TAB><TAB><TAB>return ttypager <TAB>finally: <TAB><TAB>os.unlink(filename) "
"<TAB><TAB>if hasattr(os, "system") and os.system('more "%s"' % filename) == 0: <TAB><TAB><TAB>return lambda text: pipepager(text, "more") <TAB><TAB>else: <TAB><TAB><TAB>return ttypager"
"<TAB><TAB>if hasattr(os,<MASK>"
"def getpager(): <TAB>"""Decide what method to use for paging through text.""" <TAB>if type(sys.stdout) is not types.FileType: <TAB><TAB>return plainpager <TAB>if not sys.stdin.isatty() or not sys.stdout.isatty(): <TAB><TAB>return plainpager <TAB>if "PAGER" in os.environ: <TAB><TAB>if sys.platform == "win32": # pipes completely broken in Windows <TAB><TAB><TAB>return lambda text: tempfilepager(plain(text), os.environ["PAGER"]) <TAB><TAB>elif os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB><TAB>return lambda text: pipepager(plain(text), os.environ["PAGER"]) <TAB><TAB>else: <TAB><TAB><TAB>return lambda text: pipepager(text, os.environ["PAGER"]) <TAB>if os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB>return plainpager <TAB>if sys.platform == "win32" or sys.platform.startswith("os2"): <TAB><TAB>return lambda text: tempfilepager(plain(text), "more <") <TAB>if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0: <TAB><TAB>return lambda text: pipepager(text, "less") <TAB>import tempfile <TAB>(fd, filename) = tempfile.mkstemp() <TAB>os.close(fd) <TAB>try: <TAB><TAB>if hasattr(os,<MASK> <TAB>finally: <TAB><TAB>os.unlink(filename) "
""system") and os.system('more "%s"' % filename) == 0: <TAB><TAB><TAB>return lambda text: pipepager(text, "more") <TAB><TAB>else: <TAB><TAB><TAB>return ttypager"
"<TAB><TAB>if hasattr(os, "system") and os.system('more "%s"' % filename) == 0: <TAB><TAB><TAB>return lambda text:<MASK>"
"def getpager(): <TAB>"""Decide what method to use for paging through text.""" <TAB>if type(sys.stdout) is not types.FileType: <TAB><TAB>return plainpager <TAB>if not sys.stdin.isatty() or not sys.stdout.isatty(): <TAB><TAB>return plainpager <TAB>if "PAGER" in os.environ: <TAB><TAB>if sys.platform == "win32": # pipes completely broken in Windows <TAB><TAB><TAB>return lambda text: tempfilepager(plain(text), os.environ["PAGER"]) <TAB><TAB>elif os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB><TAB>return lambda text: pipepager(plain(text), os.environ["PAGER"]) <TAB><TAB>else: <TAB><TAB><TAB>return lambda text: pipepager(text, os.environ["PAGER"]) <TAB>if os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB>return plainpager <TAB>if sys.platform == "win32" or sys.platform.startswith("os2"): <TAB><TAB>return lambda text: tempfilepager(plain(text), "more <") <TAB>if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0: <TAB><TAB>return lambda text: pipepager(text, "less") <TAB>import tempfile <TAB>(fd, filename) = tempfile.mkstemp() <TAB>os.close(fd) <TAB>try: <TAB><TAB>if hasattr(os, "system") and os.system('more "%s"' % filename) == 0: <TAB><TAB><TAB>return lambda text:<MASK> <TAB>finally: <TAB><TAB>os.unlink(filename) "
"pipepager(text, "more") <TAB><TAB>else: <TAB><TAB><TAB>return ttypager"
"<TAB><TAB>if hasattr(os, "system") and os.system('more "%s"' % filename) == 0: <TAB><TAB><TAB>return lambda text: pipepager(text, "more") <MASK>"
"def getpager(): <TAB>"""Decide what method to use for paging through text.""" <TAB>if type(sys.stdout) is not types.FileType: <TAB><TAB>return plainpager <TAB>if not sys.stdin.isatty() or not sys.stdout.isatty(): <TAB><TAB>return plainpager <TAB>if "PAGER" in os.environ: <TAB><TAB>if sys.platform == "win32": # pipes completely broken in Windows <TAB><TAB><TAB>return lambda text: tempfilepager(plain(text), os.environ["PAGER"]) <TAB><TAB>elif os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB><TAB>return lambda text: pipepager(plain(text), os.environ["PAGER"]) <TAB><TAB>else: <TAB><TAB><TAB>return lambda text: pipepager(text, os.environ["PAGER"]) <TAB>if os.environ.get("TERM") in ("dumb", "emacs"): <TAB><TAB>return plainpager <TAB>if sys.platform == "win32" or sys.platform.startswith("os2"): <TAB><TAB>return lambda text: tempfilepager(plain(text), "more <") <TAB>if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0: <TAB><TAB>return lambda text: pipepager(text, "less") <TAB>import tempfile <TAB>(fd, filename) = tempfile.mkstemp() <TAB>os.close(fd) <TAB>try: <TAB><TAB>if hasattr(os, "system") and os.system('more "%s"' % filename) == 0: <TAB><TAB><TAB>return lambda text: pipepager(text, "more") <MASK> <TAB>finally: <TAB><TAB>os.unlink(filename) "
"<TAB><TAB>else: <TAB><TAB><TAB>return ttypager"
"def test_CMOVB_15_symbolic(self): <TAB>"""Instruction CMOVB_15 <TAB>Groups: cmov <TAB>0xf7ff3e81: cmovb eax, ecx <TAB>""" <TAB>cs = ConstraintSet() <TAB>mem = SMemory32(cs) <TAB>cpu = I386Cpu(mem) <TAB>mem.mmap(0xF7FF3000, 0x1000, "rwx") <TAB>mem.write(0xF7FF3E81, "\x0fB\xc1") <TAB>cpu.EIP = 0xF7FF3E81 <TAB>cpu.EAX = cs.new_bitvec(32) <TAB>cs.add(cpu.EAX == 0x1) <TAB>cpu.CF = cs.new_bool() <TAB>cs.add(cpu.CF == False) <TAB>cpu.ECX = cs.new_bitvec(32) <TAB>cs.add(cpu.ECX == 0xFFFFFFFF) <TAB>done = False <TAB>while not done: <TAB><TAB>try: <TAB><TAB><TAB>cpu.execute() <TAB><TAB><TAB>done = True <TAB><TAB>except ConcretizeRegister as e: <TAB><TAB><TAB>symbol = getattr(cpu, e.reg_name) <TAB><TAB><TAB>values = solver.get_all_values(cs, symbol) <TAB><TAB><TAB>self.assertEqual(len(values), 1) <TAB><TAB><TAB>setattr(cpu, e.reg_name, values[0]) <TAB>condition = True <TAB>condition = Operators.AND(condition, cpu.read_int(0xF7FF3E81, 8) == ord("\x0f")) <TAB>condition = Operators.AND(condition, cpu.read_int(0xF7FF3E82, 8) == ord("B")) <TAB>condition = Operators.AND(condition, cpu.read_int(0xF7FF3E83, 8) == ord("\xc1")) <TAB>condition = Operators.AND(condition, cpu.EIP == 0xF7FF3E84) <TAB>condition = Operators.AND(condition, cpu.ECX == 0xFFFFFFFF) <TAB>condition = Operators.AND(condition, cpu.EAX == 0x1) <TAB>with cs as temp_cs: <TAB><TAB>temp_cs.add(condition) <TAB><TAB>self.assertTrue(solver.check(temp_cs)) <TAB>with cs as temp_cs: <TAB><TAB>temp_cs.add(condition == False) <TAB><TAB>self.assertFalse(solver.check(temp_cs)) "
"<TAB><TAB>try: <TAB><TAB><TAB>cpu.execute() <TAB><TAB><TAB>done = True <TAB><TAB>except ConcretizeRegister as e: <TAB><TAB><TAB>symbol = getattr(cpu, e.reg_name) <TAB><TAB><TAB>values = solver.get_all_values(cs, symbol) <TAB><TAB><TAB>self.assertEqual(len(values), 1) <TAB><TAB><TAB>setattr(cpu, e.reg_name, values[0])"
"<TAB><TAB>try: <TAB><TAB><TAB>cpu.execute() <TAB><TAB><TAB>done = True <TAB><TAB>except ConcretizeRegister as e: <TAB><TAB><TAB>symbol = getattr(cpu, e.reg_name) <MASK>"
"def test_CMOVB_15_symbolic(self): <TAB>"""Instruction CMOVB_15 <TAB>Groups: cmov <TAB>0xf7ff3e81: cmovb eax, ecx <TAB>""" <TAB>cs = ConstraintSet() <TAB>mem = SMemory32(cs) <TAB>cpu = I386Cpu(mem) <TAB>mem.mmap(0xF7FF3000, 0x1000, "rwx") <TAB>mem.write(0xF7FF3E81, "\x0fB\xc1") <TAB>cpu.EIP = 0xF7FF3E81 <TAB>cpu.EAX = cs.new_bitvec(32) <TAB>cs.add(cpu.EAX == 0x1) <TAB>cpu.CF = cs.new_bool() <TAB>cs.add(cpu.CF == False) <TAB>cpu.ECX = cs.new_bitvec(32) <TAB>cs.add(cpu.ECX == 0xFFFFFFFF) <TAB>done = False <TAB>while not done: <TAB><TAB>try: <TAB><TAB><TAB>cpu.execute() <TAB><TAB><TAB>done = True <TAB><TAB>except ConcretizeRegister as e: <TAB><TAB><TAB>symbol = getattr(cpu, e.reg_name) <MASK> <TAB>condition = True <TAB>condition = Operators.AND(condition, cpu.read_int(0xF7FF3E81, 8) == ord("\x0f")) <TAB>condition = Operators.AND(condition, cpu.read_int(0xF7FF3E82, 8) == ord("B")) <TAB>condition = Operators.AND(condition, cpu.read_int(0xF7FF3E83, 8) == ord("\xc1")) <TAB>condition = Operators.AND(condition, cpu.EIP == 0xF7FF3E84) <TAB>condition = Operators.AND(condition, cpu.ECX == 0xFFFFFFFF) <TAB>condition = Operators.AND(condition, cpu.EAX == 0x1) <TAB>with cs as temp_cs: <TAB><TAB>temp_cs.add(condition) <TAB><TAB>self.assertTrue(solver.check(temp_cs)) <TAB>with cs as temp_cs: <TAB><TAB>temp_cs.add(condition == False) <TAB><TAB>self.assertFalse(solver.check(temp_cs)) "
"<TAB><TAB><TAB>values = solver.get_all_values(cs, symbol) <TAB><TAB><TAB>self.assertEqual(len(values), 1) <TAB><TAB><TAB>setattr(cpu, e.reg_name, values[0])"
"<TAB><TAB>try: <TAB><TAB><TAB>cpu.execute() <TAB><TAB><TAB>done = True <TAB><TAB>except ConcretizeRegister as e: <TAB><TAB><TAB>symbol = getattr(cpu, e.reg_name) <TAB><TAB><TAB>values = solver.get_all_values(cs, symbol) <TAB><TAB><TAB>self.assertEqual(len(values),<MASK>"
"def test_CMOVB_15_symbolic(self): <TAB>"""Instruction CMOVB_15 <TAB>Groups: cmov <TAB>0xf7ff3e81: cmovb eax, ecx <TAB>""" <TAB>cs = ConstraintSet() <TAB>mem = SMemory32(cs) <TAB>cpu = I386Cpu(mem) <TAB>mem.mmap(0xF7FF3000, 0x1000, "rwx") <TAB>mem.write(0xF7FF3E81, "\x0fB\xc1") <TAB>cpu.EIP = 0xF7FF3E81 <TAB>cpu.EAX = cs.new_bitvec(32) <TAB>cs.add(cpu.EAX == 0x1) <TAB>cpu.CF = cs.new_bool() <TAB>cs.add(cpu.CF == False) <TAB>cpu.ECX = cs.new_bitvec(32) <TAB>cs.add(cpu.ECX == 0xFFFFFFFF) <TAB>done = False <TAB>while not done: <TAB><TAB>try: <TAB><TAB><TAB>cpu.execute() <TAB><TAB><TAB>done = True <TAB><TAB>except ConcretizeRegister as e: <TAB><TAB><TAB>symbol = getattr(cpu, e.reg_name) <TAB><TAB><TAB>values = solver.get_all_values(cs, symbol) <TAB><TAB><TAB>self.assertEqual(len(values),<MASK> <TAB>condition = True <TAB>condition = Operators.AND(condition, cpu.read_int(0xF7FF3E81, 8) == ord("\x0f")) <TAB>condition = Operators.AND(condition, cpu.read_int(0xF7FF3E82, 8) == ord("B")) <TAB>condition = Operators.AND(condition, cpu.read_int(0xF7FF3E83, 8) == ord("\xc1")) <TAB>condition = Operators.AND(condition, cpu.EIP == 0xF7FF3E84) <TAB>condition = Operators.AND(condition, cpu.ECX == 0xFFFFFFFF) <TAB>condition = Operators.AND(condition, cpu.EAX == 0x1) <TAB>with cs as temp_cs: <TAB><TAB>temp_cs.add(condition) <TAB><TAB>self.assertTrue(solver.check(temp_cs)) <TAB>with cs as temp_cs: <TAB><TAB>temp_cs.add(condition == False) <TAB><TAB>self.assertFalse(solver.check(temp_cs)) "
"1) <TAB><TAB><TAB>setattr(cpu, e.reg_name, values[0])"
"<TAB><TAB>try: <TAB><TAB><TAB>cpu.execute() <TAB><TAB><TAB>done = True <TAB><TAB>except ConcretizeRegister as e: <TAB><TAB><TAB>symbol = getattr(cpu, e.reg_name) <TAB><TAB><TAB>values = solver.get_all_values(cs, symbol) <TAB><TAB><TAB>self.assertEqual(len(values), 1) <MASK>"
"def test_CMOVB_15_symbolic(self): <TAB>"""Instruction CMOVB_15 <TAB>Groups: cmov <TAB>0xf7ff3e81: cmovb eax, ecx <TAB>""" <TAB>cs = ConstraintSet() <TAB>mem = SMemory32(cs) <TAB>cpu = I386Cpu(mem) <TAB>mem.mmap(0xF7FF3000, 0x1000, "rwx") <TAB>mem.write(0xF7FF3E81, "\x0fB\xc1") <TAB>cpu.EIP = 0xF7FF3E81 <TAB>cpu.EAX = cs.new_bitvec(32) <TAB>cs.add(cpu.EAX == 0x1) <TAB>cpu.CF = cs.new_bool() <TAB>cs.add(cpu.CF == False) <TAB>cpu.ECX = cs.new_bitvec(32) <TAB>cs.add(cpu.ECX == 0xFFFFFFFF) <TAB>done = False <TAB>while not done: <TAB><TAB>try: <TAB><TAB><TAB>cpu.execute() <TAB><TAB><TAB>done = True <TAB><TAB>except ConcretizeRegister as e: <TAB><TAB><TAB>symbol = getattr(cpu, e.reg_name) <TAB><TAB><TAB>values = solver.get_all_values(cs, symbol) <TAB><TAB><TAB>self.assertEqual(len(values), 1) <MASK> <TAB>condition = True <TAB>condition = Operators.AND(condition, cpu.read_int(0xF7FF3E81, 8) == ord("\x0f")) <TAB>condition = Operators.AND(condition, cpu.read_int(0xF7FF3E82, 8) == ord("B")) <TAB>condition = Operators.AND(condition, cpu.read_int(0xF7FF3E83, 8) == ord("\xc1")) <TAB>condition = Operators.AND(condition, cpu.EIP == 0xF7FF3E84) <TAB>condition = Operators.AND(condition, cpu.ECX == 0xFFFFFFFF) <TAB>condition = Operators.AND(condition, cpu.EAX == 0x1) <TAB>with cs as temp_cs: <TAB><TAB>temp_cs.add(condition) <TAB><TAB>self.assertTrue(solver.check(temp_cs)) <TAB>with cs as temp_cs: <TAB><TAB>temp_cs.add(condition == False) <TAB><TAB>self.assertFalse(solver.check(temp_cs)) "
"<TAB><TAB><TAB>setattr(cpu, e.reg_name, values[0])"
"def test_history_cache(self): <TAB>conversation_agent = self.get_pipeline() <TAB>conversation = Conversation( <TAB><TAB>"Why do you recommend it?", <TAB><TAB>past_user_inputs=["What's the last book you have read?"], <TAB><TAB>generated_responses=["b"], <TAB>) <TAB>with self.assertLogs("transformers", level="WARNING") as log: <TAB><TAB>_ = conversation_agent(conversation, max_length=64) <TAB><TAB>self.assertEqual(len(log.output), 3) <TAB><TAB>self.assertIn( <TAB><TAB><TAB>"Cutting history off because it's too long (63 > 32) for underlying model", <TAB><TAB><TAB>log.output[0], <TAB><TAB>) <TAB><TAB>self.assertIn("63 is bigger than 0.9 * max_length: 64", log.output[1]) <TAB><TAB>self.assertIn("Setting `pad_token_id`", log.output[2]) <TAB>self.assertEqual(conversation._index, 1) <TAB>self.assertEqual( <TAB><TAB>conversation._history, <TAB><TAB>[ <TAB><TAB><TAB>87, <TAB><TAB><TAB>104, <TAB><TAB><TAB>97, <TAB><TAB><TAB>116, <TAB><TAB><TAB>39, <TAB><TAB><TAB>115, <TAB><TAB><TAB>32, <TAB><TAB><TAB>116, <TAB><TAB><TAB>104, <TAB><TAB><TAB>101, <TAB><TAB><TAB>32, <TAB><TAB><TAB>108, <TAB><TAB><TAB>97, <TAB><TAB><TAB>115, <TAB><TAB><TAB>116, <TAB><TAB><TAB>32, <TAB><TAB><TAB>98, <TAB><TAB><TAB>111, <TAB><TAB><TAB>111, <TAB><TAB><TAB>107, <TAB><TAB><TAB>32, <TAB><TAB><TAB>121, <TAB><TAB><TAB>111, <TAB><TAB><TAB>117, <TAB><TAB><TAB>32, <TAB><TAB><TAB>104, <TAB><TAB><TAB>97, <TAB><TAB><TAB>118, <TAB><TAB><TAB>101, <TAB><TAB><TAB>32, <TAB><TAB><TAB>114, <TAB><TAB><TAB>101, <TAB><TAB><TAB>97, <TAB><TAB><TAB>100, <TAB><TAB><TAB>63, <TAB><TAB><TAB>259, # EOS <TAB><TAB><TAB>98, # b <TAB><TAB><TAB>259, # EOS <TAB><TAB>], <TAB>) "
"<TAB><TAB>_ = conversation_agent(conversation, max_length=64) <TAB><TAB>self.assertEqual(len(log.output), 3) <TAB><TAB>self.assertIn( <TAB><TAB><TAB>"Cutting history off because it's too long (63 > 32) for underlying model", <TAB><TAB><TAB>log.output[0], <TAB><TAB>) <TAB><TAB>self.assertIn("63 is bigger than 0.9 * max_length: 64", log.output[1]) <TAB><TAB>self.assertIn("Setting `pad_token_id`", log.output[2])"
"<TAB><TAB>_ = conversation_agent(conversation, max_length=64) <TAB><TAB>self.assertEqual(len(log.output), 3) <TAB><TAB>self.assertIn( <TAB><TAB><TAB>"Cutting history off because it's too long (63 > 32) for underlying model", <TAB><TAB><TAB>log.output[0], <TAB><TAB>) <MASK>"
"def test_history_cache(self): <TAB>conversation_agent = self.get_pipeline() <TAB>conversation = Conversation( <TAB><TAB>"Why do you recommend it?", <TAB><TAB>past_user_inputs=["What's the last book you have read?"], <TAB><TAB>generated_responses=["b"], <TAB>) <TAB>with self.assertLogs("transformers", level="WARNING") as log: <TAB><TAB>_ = conversation_agent(conversation, max_length=64) <TAB><TAB>self.assertEqual(len(log.output), 3) <TAB><TAB>self.assertIn( <TAB><TAB><TAB>"Cutting history off because it's too long (63 > 32) for underlying model", <TAB><TAB><TAB>log.output[0], <TAB><TAB>) <MASK> <TAB>self.assertEqual(conversation._index, 1) <TAB>self.assertEqual( <TAB><TAB>conversation._history, <TAB><TAB>[ <TAB><TAB><TAB>87, <TAB><TAB><TAB>104, <TAB><TAB><TAB>97, <TAB><TAB><TAB>116, <TAB><TAB><TAB>39, <TAB><TAB><TAB>115, <TAB><TAB><TAB>32, <TAB><TAB><TAB>116, <TAB><TAB><TAB>104, <TAB><TAB><TAB>101, <TAB><TAB><TAB>32, <TAB><TAB><TAB>108, <TAB><TAB><TAB>97, <TAB><TAB><TAB>115, <TAB><TAB><TAB>116, <TAB><TAB><TAB>32, <TAB><TAB><TAB>98, <TAB><TAB><TAB>111, <TAB><TAB><TAB>111, <TAB><TAB><TAB>107, <TAB><TAB><TAB>32, <TAB><TAB><TAB>121, <TAB><TAB><TAB>111, <TAB><TAB><TAB>117, <TAB><TAB><TAB>32, <TAB><TAB><TAB>104, <TAB><TAB><TAB>97, <TAB><TAB><TAB>118, <TAB><TAB><TAB>101, <TAB><TAB><TAB>32, <TAB><TAB><TAB>114, <TAB><TAB><TAB>101, <TAB><TAB><TAB>97, <TAB><TAB><TAB>100, <TAB><TAB><TAB>63, <TAB><TAB><TAB>259, # EOS <TAB><TAB><TAB>98, # b <TAB><TAB><TAB>259, # EOS <TAB><TAB>], <TAB>) "
"<TAB><TAB>self.assertIn("63 is bigger than 0.9 * max_length: 64", log.output[1]) <TAB><TAB>self.assertIn("Setting `pad_token_id`", log.output[2])"
"<TAB><TAB>_ = conversation_agent(conversation, max_length=64) <TAB><TAB>self.assertEqual(len(log.output), 3) <TAB><TAB>self.assertIn( <TAB><TAB><TAB>"Cutting history off because it's too long (63 > 32) for underlying model", <TAB><TAB><TAB>log.output[0], <TAB><TAB>) <TAB><TAB>self.assertIn("63 is bigger than 0.9 * max_length: 64", log.output[1]) <MASK>"
"def test_history_cache(self): <TAB>conversation_agent = self.get_pipeline() <TAB>conversation = Conversation( <TAB><TAB>"Why do you recommend it?", <TAB><TAB>past_user_inputs=["What's the last book you have read?"], <TAB><TAB>generated_responses=["b"], <TAB>) <TAB>with self.assertLogs("transformers", level="WARNING") as log: <TAB><TAB>_ = conversation_agent(conversation, max_length=64) <TAB><TAB>self.assertEqual(len(log.output), 3) <TAB><TAB>self.assertIn( <TAB><TAB><TAB>"Cutting history off because it's too long (63 > 32) for underlying model", <TAB><TAB><TAB>log.output[0], <TAB><TAB>) <TAB><TAB>self.assertIn("63 is bigger than 0.9 * max_length: 64", log.output[1]) <MASK> <TAB>self.assertEqual(conversation._index, 1) <TAB>self.assertEqual( <TAB><TAB>conversation._history, <TAB><TAB>[ <TAB><TAB><TAB>87, <TAB><TAB><TAB>104, <TAB><TAB><TAB>97, <TAB><TAB><TAB>116, <TAB><TAB><TAB>39, <TAB><TAB><TAB>115, <TAB><TAB><TAB>32, <TAB><TAB><TAB>116, <TAB><TAB><TAB>104, <TAB><TAB><TAB>101, <TAB><TAB><TAB>32, <TAB><TAB><TAB>108, <TAB><TAB><TAB>97, <TAB><TAB><TAB>115, <TAB><TAB><TAB>116, <TAB><TAB><TAB>32, <TAB><TAB><TAB>98, <TAB><TAB><TAB>111, <TAB><TAB><TAB>111, <TAB><TAB><TAB>107, <TAB><TAB><TAB>32, <TAB><TAB><TAB>121, <TAB><TAB><TAB>111, <TAB><TAB><TAB>117, <TAB><TAB><TAB>32, <TAB><TAB><TAB>104, <TAB><TAB><TAB>97, <TAB><TAB><TAB>118, <TAB><TAB><TAB>101, <TAB><TAB><TAB>32, <TAB><TAB><TAB>114, <TAB><TAB><TAB>101, <TAB><TAB><TAB>97, <TAB><TAB><TAB>100, <TAB><TAB><TAB>63, <TAB><TAB><TAB>259, # EOS <TAB><TAB><TAB>98, # b <TAB><TAB><TAB>259, # EOS <TAB><TAB>], <TAB>) "
"<TAB><TAB>self.assertIn("Setting `pad_token_id`", log.output[2])"
"<TAB><TAB>_ = conversation_agent(conversation, max_length=64) <TAB><TAB>self.assertEqual(len(log.output), 3) <TAB><TAB>self.assertIn( <TAB><TAB><TAB>"Cutting history off because it's too long (63 > 32) for underlying model", <TAB><TAB><TAB>log.output[0], <TAB><TAB>) <TAB><TAB>self.assertIn("63 is bigger than 0.9 * max_length: 64", log.output[1]) <TAB><TAB>self.assertIn("Setting<MASK>"
"def test_history_cache(self): <TAB>conversation_agent = self.get_pipeline() <TAB>conversation = Conversation( <TAB><TAB>"Why do you recommend it?", <TAB><TAB>past_user_inputs=["What's the last book you have read?"], <TAB><TAB>generated_responses=["b"], <TAB>) <TAB>with self.assertLogs("transformers", level="WARNING") as log: <TAB><TAB>_ = conversation_agent(conversation, max_length=64) <TAB><TAB>self.assertEqual(len(log.output), 3) <TAB><TAB>self.assertIn( <TAB><TAB><TAB>"Cutting history off because it's too long (63 > 32) for underlying model", <TAB><TAB><TAB>log.output[0], <TAB><TAB>) <TAB><TAB>self.assertIn("63 is bigger than 0.9 * max_length: 64", log.output[1]) <TAB><TAB>self.assertIn("Setting<MASK> <TAB>self.assertEqual(conversation._index, 1) <TAB>self.assertEqual( <TAB><TAB>conversation._history, <TAB><TAB>[ <TAB><TAB><TAB>87, <TAB><TAB><TAB>104, <TAB><TAB><TAB>97, <TAB><TAB><TAB>116, <TAB><TAB><TAB>39, <TAB><TAB><TAB>115, <TAB><TAB><TAB>32, <TAB><TAB><TAB>116, <TAB><TAB><TAB>104, <TAB><TAB><TAB>101, <TAB><TAB><TAB>32, <TAB><TAB><TAB>108, <TAB><TAB><TAB>97, <TAB><TAB><TAB>115, <TAB><TAB><TAB>116, <TAB><TAB><TAB>32, <TAB><TAB><TAB>98, <TAB><TAB><TAB>111, <TAB><TAB><TAB>111, <TAB><TAB><TAB>107, <TAB><TAB><TAB>32, <TAB><TAB><TAB>121, <TAB><TAB><TAB>111, <TAB><TAB><TAB>117, <TAB><TAB><TAB>32, <TAB><TAB><TAB>104, <TAB><TAB><TAB>97, <TAB><TAB><TAB>118, <TAB><TAB><TAB>101, <TAB><TAB><TAB>32, <TAB><TAB><TAB>114, <TAB><TAB><TAB>101, <TAB><TAB><TAB>97, <TAB><TAB><TAB>100, <TAB><TAB><TAB>63, <TAB><TAB><TAB>259, # EOS <TAB><TAB><TAB>98, # b <TAB><TAB><TAB>259, # EOS <TAB><TAB>], <TAB>) "
"`pad_token_id`", log.output[2])"
"def test_train_process(self): <TAB>self._metric.train() <TAB>result = self._metric.process_train(self._state) <TAB>for i in range(0, len(self._targets)): <TAB><TAB>self.assertEqual( <TAB><TAB><TAB>result[i], <TAB><TAB><TAB>self._targets[i], <TAB><TAB><TAB>msg="returned: " <TAB><TAB><TAB>+ str(result[i]) <TAB><TAB><TAB>+ " expected: " <TAB><TAB><TAB>+ str(self._targets[i]) <TAB><TAB><TAB>+ " in: " <TAB><TAB><TAB>+ str(result), <TAB><TAB>) "
"<TAB><TAB>self.assertEqual( <TAB><TAB><TAB>result[i], <TAB><TAB><TAB>self._targets[i], <TAB><TAB><TAB>msg="returned: " <TAB><TAB><TAB>+ str(result[i]) <TAB><TAB><TAB>+ " expected: " <TAB><TAB><TAB>+ str(self._targets[i]) <TAB><TAB><TAB>+ " in: " <TAB><TAB><TAB>+ str(result), <TAB><TAB>)"
"<TAB><TAB>self.assertEqual( <TAB><TAB><TAB>result[i], <TAB><TAB><TAB>self._targets[i], <TAB><TAB><TAB>msg="returned: " <TAB><TAB><TAB>+<MASK>"
"def test_train_process(self): <TAB>self._metric.train() <TAB>result = self._metric.process_train(self._state) <TAB>for i in range(0, len(self._targets)): <TAB><TAB>self.assertEqual( <TAB><TAB><TAB>result[i], <TAB><TAB><TAB>self._targets[i], <TAB><TAB><TAB>msg="returned: " <TAB><TAB><TAB>+<MASK> "
"str(result[i]) <TAB><TAB><TAB>+ " expected: " <TAB><TAB><TAB>+ str(self._targets[i]) <TAB><TAB><TAB>+ " in: " <TAB><TAB><TAB>+ str(result), <TAB><TAB>)"
"<TAB><TAB>self.assertEqual( <TAB><TAB><TAB>result[i], <TAB><TAB><TAB>self._targets[i], <TAB><TAB><TAB>msg="returned: " <TAB><TAB><TAB>+ str(result[i]) <TAB><TAB><TAB>+ " expected: " <TAB><TAB><TAB>+ str(self._targets[i]) <MASK>"
"def test_train_process(self): <TAB>self._metric.train() <TAB>result = self._metric.process_train(self._state) <TAB>for i in range(0, len(self._targets)): <TAB><TAB>self.assertEqual( <TAB><TAB><TAB>result[i], <TAB><TAB><TAB>self._targets[i], <TAB><TAB><TAB>msg="returned: " <TAB><TAB><TAB>+ str(result[i]) <TAB><TAB><TAB>+ " expected: " <TAB><TAB><TAB>+ str(self._targets[i]) <MASK> "
"<TAB><TAB><TAB>+ " in: " <TAB><TAB><TAB>+ str(result), <TAB><TAB>)"
"<TAB><TAB>self.assertEqual( <TAB><TAB><TAB>result[i], <TAB><TAB><TAB>self._targets[i], <TAB><TAB><TAB>msg="returned: " <TAB><TAB><TAB>+ str(result[i]) <TAB><TAB><TAB>+ " expected: " <TAB><TAB><TAB>+ str(self._targets[i]) <TAB><TAB><TAB>+ " in: " <MASK>"
"def test_train_process(self): <TAB>self._metric.train() <TAB>result = self._metric.process_train(self._state) <TAB>for i in range(0, len(self._targets)): <TAB><TAB>self.assertEqual( <TAB><TAB><TAB>result[i], <TAB><TAB><TAB>self._targets[i], <TAB><TAB><TAB>msg="returned: " <TAB><TAB><TAB>+ str(result[i]) <TAB><TAB><TAB>+ " expected: " <TAB><TAB><TAB>+ str(self._targets[i]) <TAB><TAB><TAB>+ " in: " <MASK> "
"<TAB><TAB><TAB>+ str(result), <TAB><TAB>)"
"def parse_help_menu(data, start, end): <TAB>""" <TAB>parse the help menu from a certain string to a certain string <TAB>and return the parsed help <TAB>""" <TAB>try: <TAB><TAB># DO YOU SEE HOW MUCH EASIER IT IS WITH <TAB><TAB># PYTHON2 ?! <TAB><TAB>start_index = data.index(start) <TAB><TAB>end_index = data.index(end) <TAB><TAB>retval = data[start_index:end_index].strip() <TAB>except TypeError: <TAB><TAB># python3 is stupid and likes `bytes` because why tf not? <TAB><TAB>plus, plus_another = 60, 8 <TAB><TAB># so now we gotta add 60 in order to get the last line from the last command <TAB><TAB># out of the way <TAB><TAB>start_index = data.decode().index(start) + plus <TAB><TAB>end_index = data.decode().index(end) <TAB><TAB># and then we gotta convert back <TAB><TAB>data = str(data) <TAB><TAB># and then we gotta store into a temporary list <TAB><TAB>tmp = data[start_index : end_index + plus_another] <TAB><TAB># split the list into another list because of escapes <TAB><TAB># join that list with a new line and finally get the <TAB><TAB># retval out of it. Because that makes PERFECT sense <TAB><TAB>retval = "\n".join(tmp.split("\\n")).replace("n\n", "") <TAB>return retval "
"<TAB><TAB>plus, plus_another = 60, 8 <TAB><TAB># so now we gotta add 60 in order to get the last line from the last command <TAB><TAB># out of the way <TAB><TAB>start_index = data.decode().index(start) + plus <TAB><TAB>end_index = data.decode().index(end) <TAB><TAB># and then we gotta convert back <TAB><TAB>data = str(data) <TAB><TAB># and then we gotta store into a temporary list <TAB><TAB>tmp = data[start_index : end_index + plus_another] <TAB><TAB># split the list into another list because of escapes <TAB><TAB># join that list with a new line and finally get the <TAB><TAB># retval out of it. Because that makes PERFECT sense <TAB><TAB>retval = "\n".join(tmp.split("\\n")).replace("n\n", "")"
"<TAB><TAB>plus, plus_another = 60, 8 <TAB><TAB># so now we gotta add 60 in order to get the last line from the last command <TAB><TAB># out of the way <TAB><TAB>start_index = data.decode().index(start) + plus <TAB><TAB>end_index = data.decode().index(end) <TAB><TAB># and then we gotta convert back <TAB><TAB>data =<MASK>"
"def parse_help_menu(data, start, end): <TAB>""" <TAB>parse the help menu from a certain string to a certain string <TAB>and return the parsed help <TAB>""" <TAB>try: <TAB><TAB># DO YOU SEE HOW MUCH EASIER IT IS WITH <TAB><TAB># PYTHON2 ?! <TAB><TAB>start_index = data.index(start) <TAB><TAB>end_index = data.index(end) <TAB><TAB>retval = data[start_index:end_index].strip() <TAB>except TypeError: <TAB><TAB># python3 is stupid and likes `bytes` because why tf not? <TAB><TAB>plus, plus_another = 60, 8 <TAB><TAB># so now we gotta add 60 in order to get the last line from the last command <TAB><TAB># out of the way <TAB><TAB>start_index = data.decode().index(start) + plus <TAB><TAB>end_index = data.decode().index(end) <TAB><TAB># and then we gotta convert back <TAB><TAB>data =<MASK> <TAB>return retval "
"str(data) <TAB><TAB>tmp = data[start_index : end_index + plus_another] <TAB><TAB>retval = "\n".join(tmp.split("\\n")).replace("n\n", "")"
"<TAB><TAB>plus, plus_another = 60, 8 <TAB><TAB># so now we gotta add 60 in order to get the last line from the last command <TAB><TAB># out of the way <TAB><TAB>start_index = data.decode().index(start) + plus <TAB><TAB>end_index = data.decode().index(end) <TAB><TAB># and then we gotta convert back <TAB><TAB>data = str(data) <TAB><TAB># and then we gotta store into a temporary list <TAB><TAB>tmp = data[start_index : end_index + plus_another] <TAB><TAB># split the list into another list because of escapes <TAB><TAB># join that list with a new line and finally get the <TAB><TAB># retval out of it. Because that makes PERFECT sense <MASK>"
"def parse_help_menu(data, start, end): <TAB>""" <TAB>parse the help menu from a certain string to a certain string <TAB>and return the parsed help <TAB>""" <TAB>try: <TAB><TAB># DO YOU SEE HOW MUCH EASIER IT IS WITH <TAB><TAB># PYTHON2 ?! <TAB><TAB>start_index = data.index(start) <TAB><TAB>end_index = data.index(end) <TAB><TAB>retval = data[start_index:end_index].strip() <TAB>except TypeError: <TAB><TAB># python3 is stupid and likes `bytes` because why tf not? <TAB><TAB>plus, plus_another = 60, 8 <TAB><TAB># so now we gotta add 60 in order to get the last line from the last command <TAB><TAB># out of the way <TAB><TAB>start_index = data.decode().index(start) + plus <TAB><TAB>end_index = data.decode().index(end) <TAB><TAB># and then we gotta convert back <TAB><TAB>data = str(data) <TAB><TAB># and then we gotta store into a temporary list <TAB><TAB>tmp = data[start_index : end_index + plus_another] <TAB><TAB># split the list into another list because of escapes <TAB><TAB># join that list with a new line and finally get the <TAB><TAB># retval out of it. Because that makes PERFECT sense <MASK> <TAB>return retval "
"<TAB><TAB>retval = "\n".join(tmp.split("\\n")).replace("n\n", "")"
"<TAB><TAB>plus, plus_another = 60, 8 <TAB><TAB># so now we gotta add 60 in order to get the last line from the last command <TAB><TAB># out of the way <TAB><TAB>start_index = data.decode().index(start) + plus <TAB><TAB>end_index = data.decode().index(end) <TAB><TAB># and then we gotta convert back <TAB><TAB>data = str(data) <TAB><TAB># and then we gotta store into a temporary list <TAB><TAB>tmp = data[start_index : end_index + plus_another] <TAB><TAB># split the list into another list because of escapes <TAB><TAB># join that list with a new line and finally get the <TAB><TAB># retval out of it. Because that makes PERFECT sense <TAB><TAB>retval =<MASK>"
"def parse_help_menu(data, start, end): <TAB>""" <TAB>parse the help menu from a certain string to a certain string <TAB>and return the parsed help <TAB>""" <TAB>try: <TAB><TAB># DO YOU SEE HOW MUCH EASIER IT IS WITH <TAB><TAB># PYTHON2 ?! <TAB><TAB>start_index = data.index(start) <TAB><TAB>end_index = data.index(end) <TAB><TAB>retval = data[start_index:end_index].strip() <TAB>except TypeError: <TAB><TAB># python3 is stupid and likes `bytes` because why tf not? <TAB><TAB>plus, plus_another = 60, 8 <TAB><TAB># so now we gotta add 60 in order to get the last line from the last command <TAB><TAB># out of the way <TAB><TAB>start_index = data.decode().index(start) + plus <TAB><TAB>end_index = data.decode().index(end) <TAB><TAB># and then we gotta convert back <TAB><TAB>data = str(data) <TAB><TAB># and then we gotta store into a temporary list <TAB><TAB>tmp = data[start_index : end_index + plus_another] <TAB><TAB># split the list into another list because of escapes <TAB><TAB># join that list with a new line and finally get the <TAB><TAB># retval out of it. Because that makes PERFECT sense <TAB><TAB>retval =<MASK> <TAB>return retval "
""\n".join(tmp.split("\\n")).replace("n\n", "")"
"def _split_private( <TAB>prot: Pond, x: PondPrivateTensor, num_split: Union[int, list], axis: int = 0 ) -> List[PondPrivateTensor]: <TAB>x0, x1 = x.unwrapped <TAB>with tf.compat.v1.name_scope("split"): <TAB><TAB>with tf.device(prot.server_0.device_name): <TAB><TAB><TAB>ys0 = x0.split(num_split, axis=axis) <TAB><TAB>with tf.device(prot.server_1.device_name): <TAB><TAB><TAB>ys1 = x1.split(num_split, axis=axis) <TAB><TAB>return [ <TAB><TAB><TAB>PondPrivateTensor(prot, y0, y1, x.is_scaled) for y0, y1 in zip(ys0, ys1) <TAB><TAB>] "
"<TAB><TAB>with tf.device(prot.server_0.device_name): <TAB><TAB><TAB>ys0 = x0.split(num_split, axis=axis) <TAB><TAB>with tf.device(prot.server_1.device_name): <TAB><TAB><TAB>ys1 = x1.split(num_split, axis=axis) <TAB><TAB>return [ <TAB><TAB><TAB>PondPrivateTensor(prot, y0, y1, x.is_scaled) for y0, y1 in zip(ys0, ys1) <TAB><TAB>]"
"<TAB><TAB>with tf.device(prot.server_0.device_name): <TAB><TAB><TAB>ys0 = x0.split(num_split, axis=axis) <TAB><TAB>with tf.device(prot.server_1.device_name): <TAB><TAB><TAB>ys1 = x1.split(num_split, axis=axis) <MASK>"
"def _split_private( <TAB>prot: Pond, x: PondPrivateTensor, num_split: Union[int, list], axis: int = 0 ) -> List[PondPrivateTensor]: <TAB>x0, x1 = x.unwrapped <TAB>with tf.compat.v1.name_scope("split"): <TAB><TAB>with tf.device(prot.server_0.device_name): <TAB><TAB><TAB>ys0 = x0.split(num_split, axis=axis) <TAB><TAB>with tf.device(prot.server_1.device_name): <TAB><TAB><TAB>ys1 = x1.split(num_split, axis=axis) <MASK> "
"<TAB><TAB>return [ <TAB><TAB><TAB>PondPrivateTensor(prot, y0, y1, x.is_scaled) for y0, y1 in zip(ys0, ys1) <TAB><TAB>]"
"<TAB><TAB>with tf.device(prot.server_0.device_name): <TAB><TAB><TAB>ys0 = x0.split(num_split, axis=axis) <TAB><TAB>with tf.device(prot.server_1.device_name): <TAB><TAB><TAB>ys1 = x1.split(num_split, axis=axis) <TAB><TAB>return [ <TAB><TAB><TAB>PondPrivateTensor(prot, y0, y1,<MASK>"
"def _split_private( <TAB>prot: Pond, x: PondPrivateTensor, num_split: Union[int, list], axis: int = 0 ) -> List[PondPrivateTensor]: <TAB>x0, x1 = x.unwrapped <TAB>with tf.compat.v1.name_scope("split"): <TAB><TAB>with tf.device(prot.server_0.device_name): <TAB><TAB><TAB>ys0 = x0.split(num_split, axis=axis) <TAB><TAB>with tf.device(prot.server_1.device_name): <TAB><TAB><TAB>ys1 = x1.split(num_split, axis=axis) <TAB><TAB>return [ <TAB><TAB><TAB>PondPrivateTensor(prot, y0, y1,<MASK> "
"x.is_scaled) for y0, y1 in zip(ys0, ys1) <TAB><TAB>]"
"<TAB><TAB>with tf.device(prot.server_0.device_name): <TAB><TAB><TAB>ys0 = x0.split(num_split, axis=axis) <TAB><TAB>with tf.device(prot.server_1.device_name): <TAB><TAB><TAB>ys1 = x1.split(num_split, axis=axis) <TAB><TAB>return [ <TAB><TAB><TAB>PondPrivateTensor(prot, y0, y1, x.is_scaled) for y0, y1 in<MASK>"
"def _split_private( <TAB>prot: Pond, x: PondPrivateTensor, num_split: Union[int, list], axis: int = 0 ) -> List[PondPrivateTensor]: <TAB>x0, x1 = x.unwrapped <TAB>with tf.compat.v1.name_scope("split"): <TAB><TAB>with tf.device(prot.server_0.device_name): <TAB><TAB><TAB>ys0 = x0.split(num_split, axis=axis) <TAB><TAB>with tf.device(prot.server_1.device_name): <TAB><TAB><TAB>ys1 = x1.split(num_split, axis=axis) <TAB><TAB>return [ <TAB><TAB><TAB>PondPrivateTensor(prot, y0, y1, x.is_scaled) for y0, y1 in<MASK> "
"zip(ys0, ys1) <TAB><TAB>]"
"def forward(self, x: torch.Tensor) -> torch.Tensor: <TAB># number of tokens is variable <TAB>if not self.embedded_tokens: <TAB><TAB># x of shape [batch_size, number of tokens] <TAB><TAB>input = self.embedding(x) <TAB><TAB>input = input.permute(0, 2, 1) <TAB>else: <TAB><TAB># x of shape [batch_size, number of tokens, embedding_size] <TAB><TAB>input = x.permute(0, 2, 1) <TAB># input of [batch size, embedding size, number of tokens] <TAB>outputs = [] <TAB>for i in range(len(self.kernel_sizes_cnn)): <TAB><TAB># convolutional input should be of shape [batch_size, embedding_size, number of tokens] <TAB><TAB>output = getattr(self, "conv_" + str(i))(input) <TAB><TAB>output = getattr(self, "bn_" + str(i))(output) <TAB><TAB>output = getattr(self, "relu_" + str(i))(output) <TAB><TAB>output = getattr(self, "pool_" + str(i))(output) <TAB><TAB>output = output.squeeze(-1) <TAB><TAB># output of shape [batch_size, out] <TAB><TAB>outputs.append(output) <TAB>output = torch.cat(outputs, dim=-1) <TAB>output = self.dropout(output) <TAB>output = self.dense(output) <TAB>output = self.relu_dense(output) <TAB>output = self.dropout(output) <TAB>output = self.final_dense(output) <TAB>return output "
"<TAB><TAB>output = getattr(self, "conv_" + str(i))(input) <TAB><TAB>output = getattr(self, "bn_" + str(i))(output) <TAB><TAB>output = getattr(self, "relu_" + str(i))(output) <TAB><TAB>output = getattr(self, "pool_" + str(i))(output) <TAB><TAB>output = output.squeeze(-1) <TAB><TAB># output of shape [batch_size, out] <TAB><TAB>outputs.append(output)"
"<TAB><TAB>output = getattr(self, "conv_" + str(i))(input) <TAB><TAB>output = getattr(self, "bn_" + str(i))(output) <TAB><TAB>output = getattr(self, "relu_" + str(i))(output) <TAB><TAB>output =<MASK>"
"def forward(self, x: torch.Tensor) -> torch.Tensor: <TAB># number of tokens is variable <TAB>if not self.embedded_tokens: <TAB><TAB># x of shape [batch_size, number of tokens] <TAB><TAB>input = self.embedding(x) <TAB><TAB>input = input.permute(0, 2, 1) <TAB>else: <TAB><TAB># x of shape [batch_size, number of tokens, embedding_size] <TAB><TAB>input = x.permute(0, 2, 1) <TAB># input of [batch size, embedding size, number of tokens] <TAB>outputs = [] <TAB>for i in range(len(self.kernel_sizes_cnn)): <TAB><TAB># convolutional input should be of shape [batch_size, embedding_size, number of tokens] <TAB><TAB>output = getattr(self, "conv_" + str(i))(input) <TAB><TAB>output = getattr(self, "bn_" + str(i))(output) <TAB><TAB>output = getattr(self, "relu_" + str(i))(output) <TAB><TAB>output =<MASK> <TAB>output = torch.cat(outputs, dim=-1) <TAB>output = self.dropout(output) <TAB>output = self.dense(output) <TAB>output = self.relu_dense(output) <TAB>output = self.dropout(output) <TAB>output = self.final_dense(output) <TAB>return output "
"getattr(self, "pool_" + str(i))(output) <TAB><TAB>output = output.squeeze(-1) <TAB><TAB># output of shape [batch_size, out] <TAB><TAB>outputs.append(output)"
"<TAB><TAB>output = getattr(self, "conv_" + str(i))(input) <TAB><TAB>output = getattr(self, "bn_" + str(i))(output) <TAB><TAB>output = getattr(self, "relu_" + str(i))(output) <TAB><TAB>output = getattr(self, "pool_" + str(i))(output) <TAB><TAB>output = output.squeeze(-1) <MASK>"
"def forward(self, x: torch.Tensor) -> torch.Tensor: <TAB># number of tokens is variable <TAB>if not self.embedded_tokens: <TAB><TAB># x of shape [batch_size, number of tokens] <TAB><TAB>input = self.embedding(x) <TAB><TAB>input = input.permute(0, 2, 1) <TAB>else: <TAB><TAB># x of shape [batch_size, number of tokens, embedding_size] <TAB><TAB>input = x.permute(0, 2, 1) <TAB># input of [batch size, embedding size, number of tokens] <TAB>outputs = [] <TAB>for i in range(len(self.kernel_sizes_cnn)): <TAB><TAB># convolutional input should be of shape [batch_size, embedding_size, number of tokens] <TAB><TAB>output = getattr(self, "conv_" + str(i))(input) <TAB><TAB>output = getattr(self, "bn_" + str(i))(output) <TAB><TAB>output = getattr(self, "relu_" + str(i))(output) <TAB><TAB>output = getattr(self, "pool_" + str(i))(output) <TAB><TAB>output = output.squeeze(-1) <MASK> <TAB>output = torch.cat(outputs, dim=-1) <TAB>output = self.dropout(output) <TAB>output = self.dense(output) <TAB>output = self.relu_dense(output) <TAB>output = self.dropout(output) <TAB>output = self.final_dense(output) <TAB>return output "
"<TAB><TAB># output of shape [batch_size, out] <TAB><TAB>outputs.append(output)"
"<TAB><TAB>output = getattr(self, "conv_" + str(i))(input) <TAB><TAB>output = getattr(self, "bn_" + str(i))(output) <TAB><TAB>output = getattr(self, "relu_" + str(i))(output) <TAB><TAB>output = getattr(self, "pool_" + str(i))(output) <TAB><TAB>output = output.squeeze(-1) <TAB><TAB># output of shape<MASK>"
"def forward(self, x: torch.Tensor) -> torch.Tensor: <TAB># number of tokens is variable <TAB>if not self.embedded_tokens: <TAB><TAB># x of shape [batch_size, number of tokens] <TAB><TAB>input = self.embedding(x) <TAB><TAB>input = input.permute(0, 2, 1) <TAB>else: <TAB><TAB># x of shape [batch_size, number of tokens, embedding_size] <TAB><TAB>input = x.permute(0, 2, 1) <TAB># input of [batch size, embedding size, number of tokens] <TAB>outputs = [] <TAB>for i in range(len(self.kernel_sizes_cnn)): <TAB><TAB># convolutional input should be of shape [batch_size, embedding_size, number of tokens] <TAB><TAB>output = getattr(self, "conv_" + str(i))(input) <TAB><TAB>output = getattr(self, "bn_" + str(i))(output) <TAB><TAB>output = getattr(self, "relu_" + str(i))(output) <TAB><TAB>output = getattr(self, "pool_" + str(i))(output) <TAB><TAB>output = output.squeeze(-1) <TAB><TAB># output of shape<MASK> <TAB>output = torch.cat(outputs, dim=-1) <TAB>output = self.dropout(output) <TAB>output = self.dense(output) <TAB>output = self.relu_dense(output) <TAB>output = self.dropout(output) <TAB>output = self.final_dense(output) <TAB>return output "
"[batch_size, out] <TAB><TAB>outputs.append(output)"
"def test_basics(self): <TAB>"""Note that this tests code path in koDocument._guessFileIndentation""" <TAB>globalprefs = ( <TAB><TAB>components.classes["@activestate.com/koPrefService;1"] <TAB><TAB>.getService(components.interfaces.koIPrefService) <TAB><TAB>.prefs <TAB>) <TAB>defaultUseTabs = globalprefs.getBoolean("useTabs") <TAB>defaultIndentWidth = globalprefs.getLong("indentWidth") <TAB>defaultTabWidth = globalprefs.getLong("tabWidth") <TAB>manifest = [ <TAB><TAB>{ <TAB><TAB><TAB>"name": "empty text", <TAB><TAB><TAB>"content": "", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": defaultUseTabs, <TAB><TAB><TAB>"indentWidth": defaultIndentWidth, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># Tab indents == space indents (choose default) <TAB><TAB>{ <TAB><TAB><TAB>"name": "Tabs equals spaces", <TAB><TAB><TAB>"content": "\n\tfoo\n \n\tbar\n \n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": False, <TAB><TAB><TAB>"indentWidth": 4, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># Tab indents > space indents (choose tabs) <TAB><TAB>{ <TAB><TAB><TAB>"name": "More tabs than spaces", <TAB><TAB><TAB>"content": "\n\tfoo\n \n\tbar\n \n\t\n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": True, <TAB><TAB><TAB>"indentWidth": defaultTabWidth, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># Tab indents < space indents (choose spaces) <TAB><TAB>{ <TAB><TAB><TAB>"name": "Less tabs than spaces", <TAB><TAB><TAB>"content": "\n\tfoo\n bar\n\tbaz\n meat\n popsicle\n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": False, <TAB><TAB><TAB>"indentWidth": 4, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># No tab indents, but space indents (choose spaces) <TAB><TAB>{ <TAB><TAB><TAB>"name": "Spaces not tabs", <TAB><TAB><TAB>"content": "\nokay\n foo\n \n bar\n baz\n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": False, <TAB><TAB><TAB>"indentWidth": defaultIndentWidth, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB>] <TAB>for entry in manifest: <TAB><TAB>koDoc = self._koDocUntitled() <TAB><TAB>koDoc.setBufferAndEncoding(entry["content"], entry["encoding"]) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.useTabs, <TAB><TAB><TAB>entry["useTabs"], <TAB><TAB><TAB>"Failed useTabs test for %r" % (entry["name"]), <TAB><TAB>) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.indentWidth, <TAB><TAB><TAB>entry["indentWidth"], <TAB><TAB><TAB>"Failed indentWidth test for %r" % (entry["name"]), <TAB><TAB>) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.tabWidth, <TAB><TAB><TAB>entry["tabWidth"], <TAB><TAB><TAB>"Failed tabWidth test for %r" % (entry["name"]), <TAB><TAB>) "
"<TAB><TAB>koDoc = self._koDocUntitled() <TAB><TAB>koDoc.setBufferAndEncoding(entry["content"], entry["encoding"]) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.useTabs, <TAB><TAB><TAB>entry["useTabs"], <TAB><TAB><TAB>"Failed useTabs test for %r" % (entry["name"]), <TAB><TAB>) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.indentWidth, <TAB><TAB><TAB>entry["indentWidth"], <TAB><TAB><TAB>"Failed indentWidth test for %r" % (entry["name"]), <TAB><TAB>) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.tabWidth, <TAB><TAB><TAB>entry["tabWidth"], <TAB><TAB><TAB>"Failed tabWidth test for %r" % (entry["name"]), <TAB><TAB>)"
"<TAB><TAB>koDoc = self._koDocUntitled() <TAB><TAB>koDoc.setBufferAndEncoding(entry["content"], entry["encoding"]) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.useTabs, <TAB><TAB><TAB>entry["useTabs"], <TAB><TAB><TAB>"Failed useTabs test for %r" % (entry["name"]), <TAB><TAB>) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.indentWidth, <TAB><TAB><TAB>entry["indentWidth"], <TAB><TAB><TAB>"Failed indentWidth test for %r" % (entry["name"]), <TAB><TAB>) <MASK>"
"def test_basics(self): <TAB>"""Note that this tests code path in koDocument._guessFileIndentation""" <TAB>globalprefs = ( <TAB><TAB>components.classes["@activestate.com/koPrefService;1"] <TAB><TAB>.getService(components.interfaces.koIPrefService) <TAB><TAB>.prefs <TAB>) <TAB>defaultUseTabs = globalprefs.getBoolean("useTabs") <TAB>defaultIndentWidth = globalprefs.getLong("indentWidth") <TAB>defaultTabWidth = globalprefs.getLong("tabWidth") <TAB>manifest = [ <TAB><TAB>{ <TAB><TAB><TAB>"name": "empty text", <TAB><TAB><TAB>"content": "", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": defaultUseTabs, <TAB><TAB><TAB>"indentWidth": defaultIndentWidth, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># Tab indents == space indents (choose default) <TAB><TAB>{ <TAB><TAB><TAB>"name": "Tabs equals spaces", <TAB><TAB><TAB>"content": "\n\tfoo\n \n\tbar\n \n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": False, <TAB><TAB><TAB>"indentWidth": 4, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># Tab indents > space indents (choose tabs) <TAB><TAB>{ <TAB><TAB><TAB>"name": "More tabs than spaces", <TAB><TAB><TAB>"content": "\n\tfoo\n \n\tbar\n \n\t\n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": True, <TAB><TAB><TAB>"indentWidth": defaultTabWidth, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># Tab indents < space indents (choose spaces) <TAB><TAB>{ <TAB><TAB><TAB>"name": "Less tabs than spaces", <TAB><TAB><TAB>"content": "\n\tfoo\n bar\n\tbaz\n meat\n popsicle\n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": False, <TAB><TAB><TAB>"indentWidth": 4, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># No tab indents, but space indents (choose spaces) <TAB><TAB>{ <TAB><TAB><TAB>"name": "Spaces not tabs", <TAB><TAB><TAB>"content": "\nokay\n foo\n \n bar\n baz\n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": False, <TAB><TAB><TAB>"indentWidth": defaultIndentWidth, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB>] <TAB>for entry in manifest: <TAB><TAB>koDoc = self._koDocUntitled() <TAB><TAB>koDoc.setBufferAndEncoding(entry["content"], entry["encoding"]) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.useTabs, <TAB><TAB><TAB>entry["useTabs"], <TAB><TAB><TAB>"Failed useTabs test for %r" % (entry["name"]), <TAB><TAB>) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.indentWidth, <TAB><TAB><TAB>entry["indentWidth"], <TAB><TAB><TAB>"Failed indentWidth test for %r" % (entry["name"]), <TAB><TAB>) <MASK> "
"<TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.tabWidth, <TAB><TAB><TAB>entry["tabWidth"], <TAB><TAB><TAB>"Failed tabWidth test for %r" % (entry["name"]), <TAB><TAB>)"
"<TAB><TAB>koDoc = self._koDocUntitled() <TAB><TAB>koDoc.setBufferAndEncoding(entry["content"], entry["encoding"]) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.useTabs, <TAB><TAB><TAB>entry["useTabs"], <TAB><TAB><TAB>"Failed useTabs test for %r" % (entry["name"]), <TAB><TAB>) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.indentWidth, <TAB><TAB><TAB>entry["indentWidth"], <TAB><TAB><TAB>"Failed indentWidth test for %r" % (entry["name"]), <TAB><TAB>) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.tabWidth, <TAB><TAB><TAB>entry["tabWidth"], <MASK>"
"def test_basics(self): <TAB>"""Note that this tests code path in koDocument._guessFileIndentation""" <TAB>globalprefs = ( <TAB><TAB>components.classes["@activestate.com/koPrefService;1"] <TAB><TAB>.getService(components.interfaces.koIPrefService) <TAB><TAB>.prefs <TAB>) <TAB>defaultUseTabs = globalprefs.getBoolean("useTabs") <TAB>defaultIndentWidth = globalprefs.getLong("indentWidth") <TAB>defaultTabWidth = globalprefs.getLong("tabWidth") <TAB>manifest = [ <TAB><TAB>{ <TAB><TAB><TAB>"name": "empty text", <TAB><TAB><TAB>"content": "", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": defaultUseTabs, <TAB><TAB><TAB>"indentWidth": defaultIndentWidth, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># Tab indents == space indents (choose default) <TAB><TAB>{ <TAB><TAB><TAB>"name": "Tabs equals spaces", <TAB><TAB><TAB>"content": "\n\tfoo\n \n\tbar\n \n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": False, <TAB><TAB><TAB>"indentWidth": 4, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># Tab indents > space indents (choose tabs) <TAB><TAB>{ <TAB><TAB><TAB>"name": "More tabs than spaces", <TAB><TAB><TAB>"content": "\n\tfoo\n \n\tbar\n \n\t\n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": True, <TAB><TAB><TAB>"indentWidth": defaultTabWidth, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># Tab indents < space indents (choose spaces) <TAB><TAB>{ <TAB><TAB><TAB>"name": "Less tabs than spaces", <TAB><TAB><TAB>"content": "\n\tfoo\n bar\n\tbaz\n meat\n popsicle\n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": False, <TAB><TAB><TAB>"indentWidth": 4, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># No tab indents, but space indents (choose spaces) <TAB><TAB>{ <TAB><TAB><TAB>"name": "Spaces not tabs", <TAB><TAB><TAB>"content": "\nokay\n foo\n \n bar\n baz\n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": False, <TAB><TAB><TAB>"indentWidth": defaultIndentWidth, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB>] <TAB>for entry in manifest: <TAB><TAB>koDoc = self._koDocUntitled() <TAB><TAB>koDoc.setBufferAndEncoding(entry["content"], entry["encoding"]) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.useTabs, <TAB><TAB><TAB>entry["useTabs"], <TAB><TAB><TAB>"Failed useTabs test for %r" % (entry["name"]), <TAB><TAB>) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.indentWidth, <TAB><TAB><TAB>entry["indentWidth"], <TAB><TAB><TAB>"Failed indentWidth test for %r" % (entry["name"]), <TAB><TAB>) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.tabWidth, <TAB><TAB><TAB>entry["tabWidth"], <MASK> "
"<TAB><TAB><TAB>"Failed tabWidth test for %r" % (entry["name"]), <TAB><TAB>)"
"<TAB><TAB>koDoc = self._koDocUntitled() <TAB><TAB>koDoc.setBufferAndEncoding(entry["content"], entry["encoding"]) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.useTabs, <TAB><TAB><TAB>entry["useTabs"], <TAB><TAB><TAB>"Failed useTabs test for %r" % (entry["name"]), <TAB><TAB>) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.indentWidth, <TAB><TAB><TAB>entry["indentWidth"], <TAB><TAB><TAB>"Failed indentWidth test for %r" % (entry["name"]), <TAB><TAB>) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.tabWidth, <TAB><TAB><TAB>entry["tabWidth"], <TAB><TAB><TAB>"Failed tabWidth test for<MASK>"
"def test_basics(self): <TAB>"""Note that this tests code path in koDocument._guessFileIndentation""" <TAB>globalprefs = ( <TAB><TAB>components.classes["@activestate.com/koPrefService;1"] <TAB><TAB>.getService(components.interfaces.koIPrefService) <TAB><TAB>.prefs <TAB>) <TAB>defaultUseTabs = globalprefs.getBoolean("useTabs") <TAB>defaultIndentWidth = globalprefs.getLong("indentWidth") <TAB>defaultTabWidth = globalprefs.getLong("tabWidth") <TAB>manifest = [ <TAB><TAB>{ <TAB><TAB><TAB>"name": "empty text", <TAB><TAB><TAB>"content": "", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": defaultUseTabs, <TAB><TAB><TAB>"indentWidth": defaultIndentWidth, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># Tab indents == space indents (choose default) <TAB><TAB>{ <TAB><TAB><TAB>"name": "Tabs equals spaces", <TAB><TAB><TAB>"content": "\n\tfoo\n \n\tbar\n \n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": False, <TAB><TAB><TAB>"indentWidth": 4, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># Tab indents > space indents (choose tabs) <TAB><TAB>{ <TAB><TAB><TAB>"name": "More tabs than spaces", <TAB><TAB><TAB>"content": "\n\tfoo\n \n\tbar\n \n\t\n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": True, <TAB><TAB><TAB>"indentWidth": defaultTabWidth, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># Tab indents < space indents (choose spaces) <TAB><TAB>{ <TAB><TAB><TAB>"name": "Less tabs than spaces", <TAB><TAB><TAB>"content": "\n\tfoo\n bar\n\tbaz\n meat\n popsicle\n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": False, <TAB><TAB><TAB>"indentWidth": 4, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB><TAB># No tab indents, but space indents (choose spaces) <TAB><TAB>{ <TAB><TAB><TAB>"name": "Spaces not tabs", <TAB><TAB><TAB>"content": "\nokay\n foo\n \n bar\n baz\n", <TAB><TAB><TAB>"encoding": "utf-8", <TAB><TAB><TAB>"useTabs": False, <TAB><TAB><TAB>"indentWidth": defaultIndentWidth, <TAB><TAB><TAB>"tabWidth": defaultTabWidth, <TAB><TAB>}, <TAB>] <TAB>for entry in manifest: <TAB><TAB>koDoc = self._koDocUntitled() <TAB><TAB>koDoc.setBufferAndEncoding(entry["content"], entry["encoding"]) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.useTabs, <TAB><TAB><TAB>entry["useTabs"], <TAB><TAB><TAB>"Failed useTabs test for %r" % (entry["name"]), <TAB><TAB>) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.indentWidth, <TAB><TAB><TAB>entry["indentWidth"], <TAB><TAB><TAB>"Failed indentWidth test for %r" % (entry["name"]), <TAB><TAB>) <TAB><TAB>self.assertEquals( <TAB><TAB><TAB>koDoc.tabWidth, <TAB><TAB><TAB>entry["tabWidth"], <TAB><TAB><TAB>"Failed tabWidth test for<MASK> "
"%r" % (entry["name"]), <TAB><TAB>)"
"def test_call(): <TAB>"""Calling a command should result in an http request.""" <TAB>s = SoapMessage( <TAB><TAB>endpoint="http://endpoint.example.com", <TAB><TAB>method="getData", <TAB><TAB>parameters=[("one", "1")], <TAB><TAB>http_headers={"user-agent": "sonos"}, <TAB><TAB>soap_action="ACTION", <TAB><TAB>soap_header="<a_header>data</a_header>", <TAB><TAB>namespace="http://namespace.com", <TAB><TAB>other_arg=4, <TAB>) <TAB>response = mock.MagicMock() <TAB>response.headers = {} <TAB>response.status_code = 200 <TAB>response.content = DUMMY_VALID_RESPONSE <TAB>with mock.patch("requests.post", return_value=response) as fake_post: <TAB><TAB>result = s.call() <TAB><TAB>assert XML.tostring(result) <TAB><TAB>fake_post.assert_called_once_with( <TAB><TAB><TAB>"http://endpoint.example.com", <TAB><TAB><TAB>headers={ <TAB><TAB><TAB><TAB>"SOAPACTION": '"ACTION"', <TAB><TAB><TAB><TAB>"Content-Type": 'text/xml; charset="utf-8"', <TAB><TAB><TAB><TAB>"user-agent": "sonos", <TAB><TAB><TAB>}, <TAB><TAB><TAB>data=mock.ANY, <TAB><TAB><TAB>other_arg=4, <TAB><TAB>) "
"<TAB><TAB>result = s.call() <TAB><TAB>assert XML.tostring(result) <TAB><TAB>fake_post.assert_called_once_with( <TAB><TAB><TAB>"http://endpoint.example.com", <TAB><TAB><TAB>headers={ <TAB><TAB><TAB><TAB>"SOAPACTION": '"ACTION"', <TAB><TAB><TAB><TAB>"Content-Type": 'text/xml; charset="utf-8"', <TAB><TAB><TAB><TAB>"user-agent": "sonos", <TAB><TAB><TAB>}, <TAB><TAB><TAB>data=mock.ANY, <TAB><TAB><TAB>other_arg=4, <TAB><TAB>)"
"<TAB><TAB>result = s.call() <TAB><TAB>assert XML.tostring(result) <TAB><TAB>fake_post.assert_called_once_with( <TAB><TAB><TAB>"http://endpoint.example.com", <TAB><TAB><TAB>headers={ <TAB><TAB><TAB><TAB>"SOAPACTION": '"ACTION"', <TAB><TAB><TAB><TAB>"Content-Type": 'text/xml;<MASK>"
"def test_call(): <TAB>"""Calling a command should result in an http request.""" <TAB>s = SoapMessage( <TAB><TAB>endpoint="http://endpoint.example.com", <TAB><TAB>method="getData", <TAB><TAB>parameters=[("one", "1")], <TAB><TAB>http_headers={"user-agent": "sonos"}, <TAB><TAB>soap_action="ACTION", <TAB><TAB>soap_header="<a_header>data</a_header>", <TAB><TAB>namespace="http://namespace.com", <TAB><TAB>other_arg=4, <TAB>) <TAB>response = mock.MagicMock() <TAB>response.headers = {} <TAB>response.status_code = 200 <TAB>response.content = DUMMY_VALID_RESPONSE <TAB>with mock.patch("requests.post", return_value=response) as fake_post: <TAB><TAB>result = s.call() <TAB><TAB>assert XML.tostring(result) <TAB><TAB>fake_post.assert_called_once_with( <TAB><TAB><TAB>"http://endpoint.example.com", <TAB><TAB><TAB>headers={ <TAB><TAB><TAB><TAB>"SOAPACTION": '"ACTION"', <TAB><TAB><TAB><TAB>"Content-Type": 'text/xml;<MASK> "
"charset="utf-8"', <TAB><TAB><TAB><TAB>"user-agent": "sonos", <TAB><TAB><TAB>}, <TAB><TAB><TAB>data=mock.ANY, <TAB><TAB><TAB>other_arg=4, <TAB><TAB>)"
"<TAB><TAB>result = s.call() <TAB><TAB>assert XML.tostring(result) <TAB><TAB>fake_post.assert_called_once_with( <TAB><TAB><TAB>"http://endpoint.example.com", <TAB><TAB><TAB>headers={ <TAB><TAB><TAB><TAB>"SOAPACTION": '"ACTION"', <TAB><TAB><TAB><TAB>"Content-Type": 'text/xml; charset="utf-8"', <TAB><TAB><TAB><TAB>"user-agent": "sonos", <TAB><TAB><TAB>}, <MASK>"
"def test_call(): <TAB>"""Calling a command should result in an http request.""" <TAB>s = SoapMessage( <TAB><TAB>endpoint="http://endpoint.example.com", <TAB><TAB>method="getData", <TAB><TAB>parameters=[("one", "1")], <TAB><TAB>http_headers={"user-agent": "sonos"}, <TAB><TAB>soap_action="ACTION", <TAB><TAB>soap_header="<a_header>data</a_header>", <TAB><TAB>namespace="http://namespace.com", <TAB><TAB>other_arg=4, <TAB>) <TAB>response = mock.MagicMock() <TAB>response.headers = {} <TAB>response.status_code = 200 <TAB>response.content = DUMMY_VALID_RESPONSE <TAB>with mock.patch("requests.post", return_value=response) as fake_post: <TAB><TAB>result = s.call() <TAB><TAB>assert XML.tostring(result) <TAB><TAB>fake_post.assert_called_once_with( <TAB><TAB><TAB>"http://endpoint.example.com", <TAB><TAB><TAB>headers={ <TAB><TAB><TAB><TAB>"SOAPACTION": '"ACTION"', <TAB><TAB><TAB><TAB>"Content-Type": 'text/xml; charset="utf-8"', <TAB><TAB><TAB><TAB>"user-agent": "sonos", <TAB><TAB><TAB>}, <MASK> "
"<TAB><TAB><TAB>data=mock.ANY, <TAB><TAB><TAB>other_arg=4, <TAB><TAB>)"
"<TAB><TAB>result = s.call() <TAB><TAB>assert XML.tostring(result) <TAB><TAB>fake_post.assert_called_once_with( <TAB><TAB><TAB>"http://endpoint.example.com", <TAB><TAB><TAB>headers={ <TAB><TAB><TAB><TAB>"SOAPACTION": '"ACTION"', <TAB><TAB><TAB><TAB>"Content-Type": 'text/xml; charset="utf-8"', <TAB><TAB><TAB><TAB>"user-agent": "sonos", <TAB><TAB><TAB>}, <TAB><TAB><TAB>data=mock.ANY, <MASK>"
"def test_call(): <TAB>"""Calling a command should result in an http request.""" <TAB>s = SoapMessage( <TAB><TAB>endpoint="http://endpoint.example.com", <TAB><TAB>method="getData", <TAB><TAB>parameters=[("one", "1")], <TAB><TAB>http_headers={"user-agent": "sonos"}, <TAB><TAB>soap_action="ACTION", <TAB><TAB>soap_header="<a_header>data</a_header>", <TAB><TAB>namespace="http://namespace.com", <TAB><TAB>other_arg=4, <TAB>) <TAB>response = mock.MagicMock() <TAB>response.headers = {} <TAB>response.status_code = 200 <TAB>response.content = DUMMY_VALID_RESPONSE <TAB>with mock.patch("requests.post", return_value=response) as fake_post: <TAB><TAB>result = s.call() <TAB><TAB>assert XML.tostring(result) <TAB><TAB>fake_post.assert_called_once_with( <TAB><TAB><TAB>"http://endpoint.example.com", <TAB><TAB><TAB>headers={ <TAB><TAB><TAB><TAB>"SOAPACTION": '"ACTION"', <TAB><TAB><TAB><TAB>"Content-Type": 'text/xml; charset="utf-8"', <TAB><TAB><TAB><TAB>"user-agent": "sonos", <TAB><TAB><TAB>}, <TAB><TAB><TAB>data=mock.ANY, <MASK> "
"<TAB><TAB><TAB>other_arg=4, <TAB><TAB>)"
"def _test_copy_image_to_volume( <TAB>self, <TAB>extend_backing, <TAB>vops, <TAB>fetch_stream_optimized_image, <TAB>create_volume_from_non_stream_opt_image, <TAB>validate_image_disk_type, <TAB>validate_image_adapter_type, <TAB>get_adapter_type, <TAB>validate_disk_format, <TAB>get_disk_type, <TAB>vmware_disk_type="streamOptimized", <TAB>backing_disk_size=VOL_SIZE, <TAB>call_extend_backing=False, <TAB>container_format="bare", ): <TAB>image_service = mock.Mock() <TAB>image_meta = self._create_image_meta( <TAB><TAB>vmware_disktype=vmware_disk_type, container_format=container_format <TAB>) <TAB>image_service.show.return_value = image_meta <TAB>backing = mock.sentinel.backing <TAB>vops.get_backing.return_value = backing <TAB>vops.get_disk_size.return_value = backing_disk_size * units.Gi <TAB>disk_type = mock.sentinel.disk_type <TAB>get_disk_type.return_value = disk_type <TAB>context = mock.sentinel.context <TAB>volume = self._create_volume_dict() <TAB>image_id = mock.sentinel.image_id <TAB>self._driver.copy_image_to_volume(context, volume, image_service, image_id) <TAB>validate_disk_format.assert_called_once_with(image_meta["disk_format"]) <TAB>validate_image_disk_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_disktype"] <TAB>) <TAB>validate_image_adapter_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_adaptertype"] <TAB>) <TAB>if vmware_disk_type == "streamOptimized": <TAB><TAB>fetch_stream_optimized_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB>) <TAB>else: <TAB><TAB>create_volume_from_non_stream_opt_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB><TAB>image_meta["properties"]["vmware_disktype"], <TAB><TAB>) <TAB>vops.get_disk_size.assert_called_once_with(backing) <TAB>if call_extend_backing: <TAB><TAB>extend_backing.assert_called_once_with(backing, volume["size"], disk_type) <TAB>else: <TAB><TAB>self.assertFalse(extend_backing.called) "
"<TAB><TAB>fetch_stream_optimized_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB>)"
"<TAB><TAB>fetch_stream_optimized_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <MASK>"
"def _test_copy_image_to_volume( <TAB>self, <TAB>extend_backing, <TAB>vops, <TAB>fetch_stream_optimized_image, <TAB>create_volume_from_non_stream_opt_image, <TAB>validate_image_disk_type, <TAB>validate_image_adapter_type, <TAB>get_adapter_type, <TAB>validate_disk_format, <TAB>get_disk_type, <TAB>vmware_disk_type="streamOptimized", <TAB>backing_disk_size=VOL_SIZE, <TAB>call_extend_backing=False, <TAB>container_format="bare", ): <TAB>image_service = mock.Mock() <TAB>image_meta = self._create_image_meta( <TAB><TAB>vmware_disktype=vmware_disk_type, container_format=container_format <TAB>) <TAB>image_service.show.return_value = image_meta <TAB>backing = mock.sentinel.backing <TAB>vops.get_backing.return_value = backing <TAB>vops.get_disk_size.return_value = backing_disk_size * units.Gi <TAB>disk_type = mock.sentinel.disk_type <TAB>get_disk_type.return_value = disk_type <TAB>context = mock.sentinel.context <TAB>volume = self._create_volume_dict() <TAB>image_id = mock.sentinel.image_id <TAB>self._driver.copy_image_to_volume(context, volume, image_service, image_id) <TAB>validate_disk_format.assert_called_once_with(image_meta["disk_format"]) <TAB>validate_image_disk_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_disktype"] <TAB>) <TAB>validate_image_adapter_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_adaptertype"] <TAB>) <TAB>if vmware_disk_type == "streamOptimized": <TAB><TAB>fetch_stream_optimized_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <MASK> <TAB>else: <TAB><TAB>create_volume_from_non_stream_opt_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB><TAB>image_meta["properties"]["vmware_disktype"], <TAB><TAB>) <TAB>vops.get_disk_size.assert_called_once_with(backing) <TAB>if call_extend_backing: <TAB><TAB>extend_backing.assert_called_once_with(backing, volume["size"], disk_type) <TAB>else: <TAB><TAB>self.assertFalse(extend_backing.called) "
"<TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB>)"
"<TAB><TAB>fetch_stream_optimized_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <MASK>"
"def _test_copy_image_to_volume( <TAB>self, <TAB>extend_backing, <TAB>vops, <TAB>fetch_stream_optimized_image, <TAB>create_volume_from_non_stream_opt_image, <TAB>validate_image_disk_type, <TAB>validate_image_adapter_type, <TAB>get_adapter_type, <TAB>validate_disk_format, <TAB>get_disk_type, <TAB>vmware_disk_type="streamOptimized", <TAB>backing_disk_size=VOL_SIZE, <TAB>call_extend_backing=False, <TAB>container_format="bare", ): <TAB>image_service = mock.Mock() <TAB>image_meta = self._create_image_meta( <TAB><TAB>vmware_disktype=vmware_disk_type, container_format=container_format <TAB>) <TAB>image_service.show.return_value = image_meta <TAB>backing = mock.sentinel.backing <TAB>vops.get_backing.return_value = backing <TAB>vops.get_disk_size.return_value = backing_disk_size * units.Gi <TAB>disk_type = mock.sentinel.disk_type <TAB>get_disk_type.return_value = disk_type <TAB>context = mock.sentinel.context <TAB>volume = self._create_volume_dict() <TAB>image_id = mock.sentinel.image_id <TAB>self._driver.copy_image_to_volume(context, volume, image_service, image_id) <TAB>validate_disk_format.assert_called_once_with(image_meta["disk_format"]) <TAB>validate_image_disk_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_disktype"] <TAB>) <TAB>validate_image_adapter_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_adaptertype"] <TAB>) <TAB>if vmware_disk_type == "streamOptimized": <TAB><TAB>fetch_stream_optimized_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <MASK> <TAB>else: <TAB><TAB>create_volume_from_non_stream_opt_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB><TAB>image_meta["properties"]["vmware_disktype"], <TAB><TAB>) <TAB>vops.get_disk_size.assert_called_once_with(backing) <TAB>if call_extend_backing: <TAB><TAB>extend_backing.assert_called_once_with(backing, volume["size"], disk_type) <TAB>else: <TAB><TAB>self.assertFalse(extend_backing.called) "
"<TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB>)"
"<TAB><TAB>fetch_stream_optimized_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <MASK>"
"def _test_copy_image_to_volume( <TAB>self, <TAB>extend_backing, <TAB>vops, <TAB>fetch_stream_optimized_image, <TAB>create_volume_from_non_stream_opt_image, <TAB>validate_image_disk_type, <TAB>validate_image_adapter_type, <TAB>get_adapter_type, <TAB>validate_disk_format, <TAB>get_disk_type, <TAB>vmware_disk_type="streamOptimized", <TAB>backing_disk_size=VOL_SIZE, <TAB>call_extend_backing=False, <TAB>container_format="bare", ): <TAB>image_service = mock.Mock() <TAB>image_meta = self._create_image_meta( <TAB><TAB>vmware_disktype=vmware_disk_type, container_format=container_format <TAB>) <TAB>image_service.show.return_value = image_meta <TAB>backing = mock.sentinel.backing <TAB>vops.get_backing.return_value = backing <TAB>vops.get_disk_size.return_value = backing_disk_size * units.Gi <TAB>disk_type = mock.sentinel.disk_type <TAB>get_disk_type.return_value = disk_type <TAB>context = mock.sentinel.context <TAB>volume = self._create_volume_dict() <TAB>image_id = mock.sentinel.image_id <TAB>self._driver.copy_image_to_volume(context, volume, image_service, image_id) <TAB>validate_disk_format.assert_called_once_with(image_meta["disk_format"]) <TAB>validate_image_disk_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_disktype"] <TAB>) <TAB>validate_image_adapter_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_adaptertype"] <TAB>) <TAB>if vmware_disk_type == "streamOptimized": <TAB><TAB>fetch_stream_optimized_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <MASK> <TAB>else: <TAB><TAB>create_volume_from_non_stream_opt_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB><TAB>image_meta["properties"]["vmware_disktype"], <TAB><TAB>) <TAB>vops.get_disk_size.assert_called_once_with(backing) <TAB>if call_extend_backing: <TAB><TAB>extend_backing.assert_called_once_with(backing, volume["size"], disk_type) <TAB>else: <TAB><TAB>self.assertFalse(extend_backing.called) "
"<TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB>)"
"def _test_copy_image_to_volume( <TAB>self, <TAB>extend_backing, <TAB>vops, <TAB>fetch_stream_optimized_image, <TAB>create_volume_from_non_stream_opt_image, <TAB>validate_image_disk_type, <TAB>validate_image_adapter_type, <TAB>get_adapter_type, <TAB>validate_disk_format, <TAB>get_disk_type, <TAB>vmware_disk_type="streamOptimized", <TAB>backing_disk_size=VOL_SIZE, <TAB>call_extend_backing=False, <TAB>container_format="bare", ): <TAB>image_service = mock.Mock() <TAB>image_meta = self._create_image_meta( <TAB><TAB>vmware_disktype=vmware_disk_type, container_format=container_format <TAB>) <TAB>image_service.show.return_value = image_meta <TAB>backing = mock.sentinel.backing <TAB>vops.get_backing.return_value = backing <TAB>vops.get_disk_size.return_value = backing_disk_size * units.Gi <TAB>disk_type = mock.sentinel.disk_type <TAB>get_disk_type.return_value = disk_type <TAB>context = mock.sentinel.context <TAB>volume = self._create_volume_dict() <TAB>image_id = mock.sentinel.image_id <TAB>self._driver.copy_image_to_volume(context, volume, image_service, image_id) <TAB>validate_disk_format.assert_called_once_with(image_meta["disk_format"]) <TAB>validate_image_disk_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_disktype"] <TAB>) <TAB>validate_image_adapter_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_adaptertype"] <TAB>) <TAB>if vmware_disk_type == "streamOptimized": <TAB><TAB>fetch_stream_optimized_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB>) <TAB>else: <TAB><TAB>create_volume_from_non_stream_opt_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB><TAB>image_meta["properties"]["vmware_disktype"], <TAB><TAB>) <TAB>vops.get_disk_size.assert_called_once_with(backing) <TAB>if call_extend_backing: <TAB><TAB>extend_backing.assert_called_once_with(backing, volume["size"], disk_type) <TAB>else: <TAB><TAB>self.assertFalse(extend_backing.called) "
"<TAB><TAB>create_volume_from_non_stream_opt_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB><TAB>image_meta["properties"]["vmware_disktype"], <TAB><TAB>)"
"<TAB><TAB>create_volume_from_non_stream_opt_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <MASK>"
"def _test_copy_image_to_volume( <TAB>self, <TAB>extend_backing, <TAB>vops, <TAB>fetch_stream_optimized_image, <TAB>create_volume_from_non_stream_opt_image, <TAB>validate_image_disk_type, <TAB>validate_image_adapter_type, <TAB>get_adapter_type, <TAB>validate_disk_format, <TAB>get_disk_type, <TAB>vmware_disk_type="streamOptimized", <TAB>backing_disk_size=VOL_SIZE, <TAB>call_extend_backing=False, <TAB>container_format="bare", ): <TAB>image_service = mock.Mock() <TAB>image_meta = self._create_image_meta( <TAB><TAB>vmware_disktype=vmware_disk_type, container_format=container_format <TAB>) <TAB>image_service.show.return_value = image_meta <TAB>backing = mock.sentinel.backing <TAB>vops.get_backing.return_value = backing <TAB>vops.get_disk_size.return_value = backing_disk_size * units.Gi <TAB>disk_type = mock.sentinel.disk_type <TAB>get_disk_type.return_value = disk_type <TAB>context = mock.sentinel.context <TAB>volume = self._create_volume_dict() <TAB>image_id = mock.sentinel.image_id <TAB>self._driver.copy_image_to_volume(context, volume, image_service, image_id) <TAB>validate_disk_format.assert_called_once_with(image_meta["disk_format"]) <TAB>validate_image_disk_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_disktype"] <TAB>) <TAB>validate_image_adapter_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_adaptertype"] <TAB>) <TAB>if vmware_disk_type == "streamOptimized": <TAB><TAB>fetch_stream_optimized_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB>) <TAB>else: <TAB><TAB>create_volume_from_non_stream_opt_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <MASK> <TAB>vops.get_disk_size.assert_called_once_with(backing) <TAB>if call_extend_backing: <TAB><TAB>extend_backing.assert_called_once_with(backing, volume["size"], disk_type) <TAB>else: <TAB><TAB>self.assertFalse(extend_backing.called) "
"<TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB><TAB>image_meta["properties"]["vmware_disktype"], <TAB><TAB>)"
"<TAB><TAB>create_volume_from_non_stream_opt_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <MASK>"
"def _test_copy_image_to_volume( <TAB>self, <TAB>extend_backing, <TAB>vops, <TAB>fetch_stream_optimized_image, <TAB>create_volume_from_non_stream_opt_image, <TAB>validate_image_disk_type, <TAB>validate_image_adapter_type, <TAB>get_adapter_type, <TAB>validate_disk_format, <TAB>get_disk_type, <TAB>vmware_disk_type="streamOptimized", <TAB>backing_disk_size=VOL_SIZE, <TAB>call_extend_backing=False, <TAB>container_format="bare", ): <TAB>image_service = mock.Mock() <TAB>image_meta = self._create_image_meta( <TAB><TAB>vmware_disktype=vmware_disk_type, container_format=container_format <TAB>) <TAB>image_service.show.return_value = image_meta <TAB>backing = mock.sentinel.backing <TAB>vops.get_backing.return_value = backing <TAB>vops.get_disk_size.return_value = backing_disk_size * units.Gi <TAB>disk_type = mock.sentinel.disk_type <TAB>get_disk_type.return_value = disk_type <TAB>context = mock.sentinel.context <TAB>volume = self._create_volume_dict() <TAB>image_id = mock.sentinel.image_id <TAB>self._driver.copy_image_to_volume(context, volume, image_service, image_id) <TAB>validate_disk_format.assert_called_once_with(image_meta["disk_format"]) <TAB>validate_image_disk_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_disktype"] <TAB>) <TAB>validate_image_adapter_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_adaptertype"] <TAB>) <TAB>if vmware_disk_type == "streamOptimized": <TAB><TAB>fetch_stream_optimized_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB>) <TAB>else: <TAB><TAB>create_volume_from_non_stream_opt_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <MASK> <TAB>vops.get_disk_size.assert_called_once_with(backing) <TAB>if call_extend_backing: <TAB><TAB>extend_backing.assert_called_once_with(backing, volume["size"], disk_type) <TAB>else: <TAB><TAB>self.assertFalse(extend_backing.called) "
"<TAB><TAB><TAB>image_meta["properties"]["vmware_disktype"], <TAB><TAB>)"
"<TAB><TAB>create_volume_from_non_stream_opt_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <MASK>"
"def _test_copy_image_to_volume( <TAB>self, <TAB>extend_backing, <TAB>vops, <TAB>fetch_stream_optimized_image, <TAB>create_volume_from_non_stream_opt_image, <TAB>validate_image_disk_type, <TAB>validate_image_adapter_type, <TAB>get_adapter_type, <TAB>validate_disk_format, <TAB>get_disk_type, <TAB>vmware_disk_type="streamOptimized", <TAB>backing_disk_size=VOL_SIZE, <TAB>call_extend_backing=False, <TAB>container_format="bare", ): <TAB>image_service = mock.Mock() <TAB>image_meta = self._create_image_meta( <TAB><TAB>vmware_disktype=vmware_disk_type, container_format=container_format <TAB>) <TAB>image_service.show.return_value = image_meta <TAB>backing = mock.sentinel.backing <TAB>vops.get_backing.return_value = backing <TAB>vops.get_disk_size.return_value = backing_disk_size * units.Gi <TAB>disk_type = mock.sentinel.disk_type <TAB>get_disk_type.return_value = disk_type <TAB>context = mock.sentinel.context <TAB>volume = self._create_volume_dict() <TAB>image_id = mock.sentinel.image_id <TAB>self._driver.copy_image_to_volume(context, volume, image_service, image_id) <TAB>validate_disk_format.assert_called_once_with(image_meta["disk_format"]) <TAB>validate_image_disk_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_disktype"] <TAB>) <TAB>validate_image_adapter_type.assert_called_once_with( <TAB><TAB>image_meta["properties"]["vmware_adaptertype"] <TAB>) <TAB>if vmware_disk_type == "streamOptimized": <TAB><TAB>fetch_stream_optimized_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <TAB><TAB>) <TAB>else: <TAB><TAB>create_volume_from_non_stream_opt_image.assert_called_once_with( <TAB><TAB><TAB>context, <TAB><TAB><TAB>volume, <TAB><TAB><TAB>image_service, <TAB><TAB><TAB>image_id, <TAB><TAB><TAB>image_meta["size"], <TAB><TAB><TAB>image_meta["properties"]["vmware_adaptertype"], <MASK> <TAB>vops.get_disk_size.assert_called_once_with(backing) <TAB>if call_extend_backing: <TAB><TAB>extend_backing.assert_called_once_with(backing, volume["size"], disk_type) <TAB>else: <TAB><TAB>self.assertFalse(extend_backing.called) "
"<TAB><TAB><TAB>image_meta["properties"]["vmware_disktype"], <TAB><TAB>)"
"def get(self): <TAB>"""Return a table with ``batch_size`` number of rows. <TAB>:return: An instance of an Arrow table with exactly ``batch_size`` rows. <TAB>""" <TAB>assert not self.empty() <TAB># head_idx points to the next row in the buffer[0] batch to be consumed. <TAB># Accumulate selices/full batches until result_rows reaches desired batch_size. <TAB># Pop left of the deque once exhausted all rows there. <TAB>result = [] <TAB>result_rows = 0 <TAB>while result_rows < self._batch_size and self._cumulative_len > 0: <TAB><TAB>head = self._buffer[0] <TAB><TAB>piece = head[self._head_idx : self._head_idx + self._batch_size - result_rows] <TAB><TAB>self._head_idx += piece.num_rows <TAB><TAB>result_rows += piece.num_rows <TAB><TAB>result.append(piece) <TAB><TAB>if head.num_rows == self._head_idx: <TAB><TAB><TAB>self._head_idx = 0 <TAB><TAB><TAB>self._buffer.popleft() <TAB><TAB><TAB>self._cumulative_len -= head.num_rows <TAB>return pa.Table.from_batches(result) "
"<TAB><TAB>head = self._buffer[0] <TAB><TAB>piece = head[self._head_idx : self._head_idx + self._batch_size - result_rows] <TAB><TAB>self._head_idx += piece.num_rows <TAB><TAB>result_rows += piece.num_rows <TAB><TAB>result.append(piece) <TAB><TAB>if head.num_rows == self._head_idx: <TAB><TAB><TAB>self._head_idx = 0 <TAB><TAB><TAB>self._buffer.popleft() <TAB><TAB><TAB>self._cumulative_len -= head.num_rows"
"<TAB><TAB>head = self._buffer[0] <TAB><TAB>piece = head[self._head_idx : self._head_idx + self._batch_size - result_rows] <TAB><TAB>self._head_idx += piece.num_rows <TAB><TAB>result_rows += piece.num_rows <TAB><TAB>result.append(piece) <TAB><TAB>if<MASK>"
"def get(self): <TAB>"""Return a table with ``batch_size`` number of rows. <TAB>:return: An instance of an Arrow table with exactly ``batch_size`` rows. <TAB>""" <TAB>assert not self.empty() <TAB># head_idx points to the next row in the buffer[0] batch to be consumed. <TAB># Accumulate selices/full batches until result_rows reaches desired batch_size. <TAB># Pop left of the deque once exhausted all rows there. <TAB>result = [] <TAB>result_rows = 0 <TAB>while result_rows < self._batch_size and self._cumulative_len > 0: <TAB><TAB>head = self._buffer[0] <TAB><TAB>piece = head[self._head_idx : self._head_idx + self._batch_size - result_rows] <TAB><TAB>self._head_idx += piece.num_rows <TAB><TAB>result_rows += piece.num_rows <TAB><TAB>result.append(piece) <TAB><TAB>if<MASK> <TAB>return pa.Table.from_batches(result) "
"head.num_rows == self._head_idx: <TAB><TAB><TAB>self._head_idx = 0 <TAB><TAB><TAB>self._buffer.popleft() <TAB><TAB><TAB>self._cumulative_len -= head.num_rows"
"<TAB><TAB>head = self._buffer[0] <TAB><TAB>piece = head[self._head_idx : self._head_idx + self._batch_size - result_rows] <TAB><TAB>self._head_idx += piece.num_rows <TAB><TAB>result_rows += piece.num_rows <TAB><TAB>result.append(piece) <TAB><TAB>if head.num_rows == self._head_idx: <TAB><TAB><TAB>self._head_idx = 0 <MASK>"
"def get(self): <TAB>"""Return a table with ``batch_size`` number of rows. <TAB>:return: An instance of an Arrow table with exactly ``batch_size`` rows. <TAB>""" <TAB>assert not self.empty() <TAB># head_idx points to the next row in the buffer[0] batch to be consumed. <TAB># Accumulate selices/full batches until result_rows reaches desired batch_size. <TAB># Pop left of the deque once exhausted all rows there. <TAB>result = [] <TAB>result_rows = 0 <TAB>while result_rows < self._batch_size and self._cumulative_len > 0: <TAB><TAB>head = self._buffer[0] <TAB><TAB>piece = head[self._head_idx : self._head_idx + self._batch_size - result_rows] <TAB><TAB>self._head_idx += piece.num_rows <TAB><TAB>result_rows += piece.num_rows <TAB><TAB>result.append(piece) <TAB><TAB>if head.num_rows == self._head_idx: <TAB><TAB><TAB>self._head_idx = 0 <MASK> <TAB>return pa.Table.from_batches(result) "
"<TAB><TAB><TAB>self._buffer.popleft() <TAB><TAB><TAB>self._cumulative_len -= head.num_rows"
"<TAB><TAB>head = self._buffer[0] <TAB><TAB>piece = head[self._head_idx : self._head_idx + self._batch_size - result_rows] <TAB><TAB>self._head_idx += piece.num_rows <TAB><TAB>result_rows += piece.num_rows <TAB><TAB>result.append(piece) <TAB><TAB>if head.num_rows == self._head_idx: <TAB><TAB><TAB>self._head_idx = 0 <TAB><TAB><TAB>self._buffer.popleft() <MASK>"
"def get(self): <TAB>"""Return a table with ``batch_size`` number of rows. <TAB>:return: An instance of an Arrow table with exactly ``batch_size`` rows. <TAB>""" <TAB>assert not self.empty() <TAB># head_idx points to the next row in the buffer[0] batch to be consumed. <TAB># Accumulate selices/full batches until result_rows reaches desired batch_size. <TAB># Pop left of the deque once exhausted all rows there. <TAB>result = [] <TAB>result_rows = 0 <TAB>while result_rows < self._batch_size and self._cumulative_len > 0: <TAB><TAB>head = self._buffer[0] <TAB><TAB>piece = head[self._head_idx : self._head_idx + self._batch_size - result_rows] <TAB><TAB>self._head_idx += piece.num_rows <TAB><TAB>result_rows += piece.num_rows <TAB><TAB>result.append(piece) <TAB><TAB>if head.num_rows == self._head_idx: <TAB><TAB><TAB>self._head_idx = 0 <TAB><TAB><TAB>self._buffer.popleft() <MASK> <TAB>return pa.Table.from_batches(result) "
"<TAB><TAB><TAB>self._cumulative_len -= head.num_rows"
"def _put_record( <TAB>self, request_uri, input_buff, record_type, headers, params, start_response ): <TAB>if record_type == "stream": <TAB><TAB>if self.writer.write_stream_to_file(params, input_buff): <TAB><TAB><TAB>msg = {"success": "true"} <TAB><TAB>else: <TAB><TAB><TAB>msg = {"error_message": "upload_error"} <TAB><TAB>return self.send_message(msg, "200 OK", start_response) <TAB>req_stream = None <TAB>try: <TAB><TAB>req_stream = ReqWrapper(input_buff, headers, params, self.create_buff_func) <TAB><TAB>while True: <TAB><TAB><TAB>buff = req_stream.read() <TAB><TAB><TAB>if not buff: <TAB><TAB><TAB><TAB>break <TAB><TAB>content_type = headers.get("Content-Type") <TAB><TAB>payload_length = req_stream.out.tell() <TAB><TAB>req_stream.out.seek(0) <TAB><TAB>record = self.writer.create_warc_record( <TAB><TAB><TAB>uri=params["url"], <TAB><TAB><TAB>record_type=record_type, <TAB><TAB><TAB>payload=req_stream.out, <TAB><TAB><TAB>length=payload_length, <TAB><TAB><TAB>warc_content_type=content_type, <TAB><TAB><TAB>warc_headers_dict=req_stream.headers, <TAB><TAB>) <TAB><TAB>self.writer.write_record(record, params) <TAB><TAB>msg = { <TAB><TAB><TAB>"success": "true", <TAB><TAB><TAB>"WARC-Date": record.rec_headers.get_header("WARC-Date"), <TAB><TAB>} <TAB>finally: <TAB><TAB>if req_stream: <TAB><TAB><TAB>no_except_close(req_stream.out) <TAB>return self.send_message(msg, "200 OK", start_response) "
"<TAB><TAB>if self.writer.write_stream_to_file(params, input_buff): <TAB><TAB><TAB>msg = {"success": "true"} <TAB><TAB>else: <TAB><TAB><TAB>msg = {"error_message": "upload_error"} <TAB><TAB>return self.send_message(msg, "200 OK", start_response)"
"<TAB><TAB>if self.writer.write_stream_to_file(params, input_buff): <TAB><TAB><TAB>msg =<MASK>"
"def _put_record( <TAB>self, request_uri, input_buff, record_type, headers, params, start_response ): <TAB>if record_type == "stream": <TAB><TAB>if self.writer.write_stream_to_file(params, input_buff): <TAB><TAB><TAB>msg =<MASK> <TAB>req_stream = None <TAB>try: <TAB><TAB>req_stream = ReqWrapper(input_buff, headers, params, self.create_buff_func) <TAB><TAB>while True: <TAB><TAB><TAB>buff = req_stream.read() <TAB><TAB><TAB>if not buff: <TAB><TAB><TAB><TAB>break <TAB><TAB>content_type = headers.get("Content-Type") <TAB><TAB>payload_length = req_stream.out.tell() <TAB><TAB>req_stream.out.seek(0) <TAB><TAB>record = self.writer.create_warc_record( <TAB><TAB><TAB>uri=params["url"], <TAB><TAB><TAB>record_type=record_type, <TAB><TAB><TAB>payload=req_stream.out, <TAB><TAB><TAB>length=payload_length, <TAB><TAB><TAB>warc_content_type=content_type, <TAB><TAB><TAB>warc_headers_dict=req_stream.headers, <TAB><TAB>) <TAB><TAB>self.writer.write_record(record, params) <TAB><TAB>msg = { <TAB><TAB><TAB>"success": "true", <TAB><TAB><TAB>"WARC-Date": record.rec_headers.get_header("WARC-Date"), <TAB><TAB>} <TAB>finally: <TAB><TAB>if req_stream: <TAB><TAB><TAB>no_except_close(req_stream.out) <TAB>return self.send_message(msg, "200 OK", start_response) "
"{"success": "true"} <TAB><TAB>else: <TAB><TAB><TAB>msg = {"error_message": "upload_error"} <TAB><TAB>return self.send_message(msg, "200 OK", start_response)"
"<TAB><TAB>if self.writer.write_stream_to_file(params, input_buff): <TAB><TAB><TAB>msg = {"success": "true"} <TAB><TAB>else: <TAB><TAB><TAB>msg = {"error_message": "upload_error"} <MASK>"
"def _put_record( <TAB>self, request_uri, input_buff, record_type, headers, params, start_response ): <TAB>if record_type == "stream": <TAB><TAB>if self.writer.write_stream_to_file(params, input_buff): <TAB><TAB><TAB>msg = {"success": "true"} <TAB><TAB>else: <TAB><TAB><TAB>msg = {"error_message": "upload_error"} <MASK> <TAB>req_stream = None <TAB>try: <TAB><TAB>req_stream = ReqWrapper(input_buff, headers, params, self.create_buff_func) <TAB><TAB>while True: <TAB><TAB><TAB>buff = req_stream.read() <TAB><TAB><TAB>if not buff: <TAB><TAB><TAB><TAB>break <TAB><TAB>content_type = headers.get("Content-Type") <TAB><TAB>payload_length = req_stream.out.tell() <TAB><TAB>req_stream.out.seek(0) <TAB><TAB>record = self.writer.create_warc_record( <TAB><TAB><TAB>uri=params["url"], <TAB><TAB><TAB>record_type=record_type, <TAB><TAB><TAB>payload=req_stream.out, <TAB><TAB><TAB>length=payload_length, <TAB><TAB><TAB>warc_content_type=content_type, <TAB><TAB><TAB>warc_headers_dict=req_stream.headers, <TAB><TAB>) <TAB><TAB>self.writer.write_record(record, params) <TAB><TAB>msg = { <TAB><TAB><TAB>"success": "true", <TAB><TAB><TAB>"WARC-Date": record.rec_headers.get_header("WARC-Date"), <TAB><TAB>} <TAB>finally: <TAB><TAB>if req_stream: <TAB><TAB><TAB>no_except_close(req_stream.out) <TAB>return self.send_message(msg, "200 OK", start_response) "
"<TAB><TAB>return self.send_message(msg, "200 OK", start_response)"
"<TAB><TAB>if self.writer.write_stream_to_file(params, input_buff): <TAB><TAB><TAB>msg = {"success": "true"} <TAB><TAB>else: <TAB><TAB><TAB>msg = {"error_message": "upload_error"} <TAB><TAB>return<MASK>"
"def _put_record( <TAB>self, request_uri, input_buff, record_type, headers, params, start_response ): <TAB>if record_type == "stream": <TAB><TAB>if self.writer.write_stream_to_file(params, input_buff): <TAB><TAB><TAB>msg = {"success": "true"} <TAB><TAB>else: <TAB><TAB><TAB>msg = {"error_message": "upload_error"} <TAB><TAB>return<MASK> <TAB>req_stream = None <TAB>try: <TAB><TAB>req_stream = ReqWrapper(input_buff, headers, params, self.create_buff_func) <TAB><TAB>while True: <TAB><TAB><TAB>buff = req_stream.read() <TAB><TAB><TAB>if not buff: <TAB><TAB><TAB><TAB>break <TAB><TAB>content_type = headers.get("Content-Type") <TAB><TAB>payload_length = req_stream.out.tell() <TAB><TAB>req_stream.out.seek(0) <TAB><TAB>record = self.writer.create_warc_record( <TAB><TAB><TAB>uri=params["url"], <TAB><TAB><TAB>record_type=record_type, <TAB><TAB><TAB>payload=req_stream.out, <TAB><TAB><TAB>length=payload_length, <TAB><TAB><TAB>warc_content_type=content_type, <TAB><TAB><TAB>warc_headers_dict=req_stream.headers, <TAB><TAB>) <TAB><TAB>self.writer.write_record(record, params) <TAB><TAB>msg = { <TAB><TAB><TAB>"success": "true", <TAB><TAB><TAB>"WARC-Date": record.rec_headers.get_header("WARC-Date"), <TAB><TAB>} <TAB>finally: <TAB><TAB>if req_stream: <TAB><TAB><TAB>no_except_close(req_stream.out) <TAB>return self.send_message(msg, "200 OK", start_response) "
"self.send_message(msg, "200 OK", start_response)"
"def _put_record( <TAB>self, request_uri, input_buff, record_type, headers, params, start_response ): <TAB>if record_type == "stream": <TAB><TAB>if self.writer.write_stream_to_file(params, input_buff): <TAB><TAB><TAB>msg = {"success": "true"} <TAB><TAB>else: <TAB><TAB><TAB>msg = {"error_message": "upload_error"} <TAB><TAB>return self.send_message(msg, "200 OK", start_response) <TAB>req_stream = None <TAB>try: <TAB><TAB>req_stream = ReqWrapper(input_buff, headers, params, self.create_buff_func) <TAB><TAB>while True: <TAB><TAB><TAB>buff = req_stream.read() <TAB><TAB><TAB>if not buff: <TAB><TAB><TAB><TAB>break <TAB><TAB>content_type = headers.get("Content-Type") <TAB><TAB>payload_length = req_stream.out.tell() <TAB><TAB>req_stream.out.seek(0) <TAB><TAB>record = self.writer.create_warc_record( <TAB><TAB><TAB>uri=params["url"], <TAB><TAB><TAB>record_type=record_type, <TAB><TAB><TAB>payload=req_stream.out, <TAB><TAB><TAB>length=payload_length, <TAB><TAB><TAB>warc_content_type=content_type, <TAB><TAB><TAB>warc_headers_dict=req_stream.headers, <TAB><TAB>) <TAB><TAB>self.writer.write_record(record, params) <TAB><TAB>msg = { <TAB><TAB><TAB>"success": "true", <TAB><TAB><TAB>"WARC-Date": record.rec_headers.get_header("WARC-Date"), <TAB><TAB>} <TAB>finally: <TAB><TAB>if req_stream: <TAB><TAB><TAB>no_except_close(req_stream.out) <TAB>return self.send_message(msg, "200 OK", start_response) "
"<TAB><TAB>req_stream = ReqWrapper(input_buff, headers, params, self.create_buff_func) <TAB><TAB>while True: <TAB><TAB><TAB>buff = req_stream.read() <TAB><TAB><TAB>if not buff: <TAB><TAB><TAB><TAB>break <TAB><TAB>content_type = headers.get("Content-Type") <TAB><TAB>payload_length = req_stream.out.tell() <TAB><TAB>req_stream.out.seek(0) <TAB><TAB>record = self.writer.create_warc_record( <TAB><TAB><TAB>uri=params["url"], <TAB><TAB><TAB>record_type=record_type, <TAB><TAB><TAB>payload=req_stream.out, <TAB><TAB><TAB>length=payload_length, <TAB><TAB><TAB>warc_content_type=content_type, <TAB><TAB><TAB>warc_headers_dict=req_stream.headers, <TAB><TAB>) <TAB><TAB>self.writer.write_record(record, params) <TAB><TAB>msg = { <TAB><TAB><TAB>"success": "true", <TAB><TAB><TAB>"WARC-Date": record.rec_headers.get_header("WARC-Date"), <TAB><TAB>}"
"<TAB><TAB>req_stream = ReqWrapper(input_buff, headers, params, self.create_buff_func) <TAB><TAB>while True: <TAB><TAB><TAB>buff = req_stream.read() <TAB><TAB><TAB>if not buff: <TAB><TAB><TAB><TAB>break <TAB><TAB>content_type = headers.get("Content-Type") <TAB><TAB>payload_length = req_stream.out.tell() <TAB><TAB>req_stream.out.seek(0) <TAB><TAB>record = self.writer.create_warc_record( <TAB><TAB><TAB>uri=params["url"], <TAB><TAB><TAB>record_type=record_type, <TAB><TAB><TAB>payload=req_stream.out, <TAB><TAB><TAB>length=payload_length, <TAB><TAB><TAB>warc_content_type=content_type, <TAB><TAB><TAB>warc_headers_dict=req_stream.headers, <TAB><TAB>) <TAB><TAB>self.writer.write_record(record,<MASK>"
"def _put_record( <TAB>self, request_uri, input_buff, record_type, headers, params, start_response ): <TAB>if record_type == "stream": <TAB><TAB>if self.writer.write_stream_to_file(params, input_buff): <TAB><TAB><TAB>msg = {"success": "true"} <TAB><TAB>else: <TAB><TAB><TAB>msg = {"error_message": "upload_error"} <TAB><TAB>return self.send_message(msg, "200 OK", start_response) <TAB>req_stream = None <TAB>try: <TAB><TAB>req_stream = ReqWrapper(input_buff, headers, params, self.create_buff_func) <TAB><TAB>while True: <TAB><TAB><TAB>buff = req_stream.read() <TAB><TAB><TAB>if not buff: <TAB><TAB><TAB><TAB>break <TAB><TAB>content_type = headers.get("Content-Type") <TAB><TAB>payload_length = req_stream.out.tell() <TAB><TAB>req_stream.out.seek(0) <TAB><TAB>record = self.writer.create_warc_record( <TAB><TAB><TAB>uri=params["url"], <TAB><TAB><TAB>record_type=record_type, <TAB><TAB><TAB>payload=req_stream.out, <TAB><TAB><TAB>length=payload_length, <TAB><TAB><TAB>warc_content_type=content_type, <TAB><TAB><TAB>warc_headers_dict=req_stream.headers, <TAB><TAB>) <TAB><TAB>self.writer.write_record(record,<MASK> <TAB>finally: <TAB><TAB>if req_stream: <TAB><TAB><TAB>no_except_close(req_stream.out) <TAB>return self.send_message(msg, "200 OK", start_response) "
"params) <TAB><TAB>msg = { <TAB><TAB><TAB>"success": "true", <TAB><TAB><TAB>"WARC-Date": record.rec_headers.get_header("WARC-Date"), <TAB><TAB>}"
"<TAB><TAB>req_stream = ReqWrapper(input_buff, headers, params, self.create_buff_func) <TAB><TAB>while True: <TAB><TAB><TAB>buff = req_stream.read() <TAB><TAB><TAB>if not buff: <TAB><TAB><TAB><TAB>break <TAB><TAB>content_type = headers.get("Content-Type") <TAB><TAB>payload_length = req_stream.out.tell() <TAB><TAB>req_stream.out.seek(0) <TAB><TAB>record = self.writer.create_warc_record( <TAB><TAB><TAB>uri=params["url"], <TAB><TAB><TAB>record_type=record_type, <TAB><TAB><TAB>payload=req_stream.out, <TAB><TAB><TAB>length=payload_length, <TAB><TAB><TAB>warc_content_type=content_type, <TAB><TAB><TAB>warc_headers_dict=req_stream.headers, <TAB><TAB>) <TAB><TAB>self.writer.write_record(record, params) <TAB><TAB>msg = { <TAB><TAB><TAB>"success": "true", <MASK>"
"def _put_record( <TAB>self, request_uri, input_buff, record_type, headers, params, start_response ): <TAB>if record_type == "stream": <TAB><TAB>if self.writer.write_stream_to_file(params, input_buff): <TAB><TAB><TAB>msg = {"success": "true"} <TAB><TAB>else: <TAB><TAB><TAB>msg = {"error_message": "upload_error"} <TAB><TAB>return self.send_message(msg, "200 OK", start_response) <TAB>req_stream = None <TAB>try: <TAB><TAB>req_stream = ReqWrapper(input_buff, headers, params, self.create_buff_func) <TAB><TAB>while True: <TAB><TAB><TAB>buff = req_stream.read() <TAB><TAB><TAB>if not buff: <TAB><TAB><TAB><TAB>break <TAB><TAB>content_type = headers.get("Content-Type") <TAB><TAB>payload_length = req_stream.out.tell() <TAB><TAB>req_stream.out.seek(0) <TAB><TAB>record = self.writer.create_warc_record( <TAB><TAB><TAB>uri=params["url"], <TAB><TAB><TAB>record_type=record_type, <TAB><TAB><TAB>payload=req_stream.out, <TAB><TAB><TAB>length=payload_length, <TAB><TAB><TAB>warc_content_type=content_type, <TAB><TAB><TAB>warc_headers_dict=req_stream.headers, <TAB><TAB>) <TAB><TAB>self.writer.write_record(record, params) <TAB><TAB>msg = { <TAB><TAB><TAB>"success": "true", <MASK> <TAB>finally: <TAB><TAB>if req_stream: <TAB><TAB><TAB>no_except_close(req_stream.out) <TAB>return self.send_message(msg, "200 OK", start_response) "
"<TAB><TAB><TAB>"WARC-Date": record.rec_headers.get_header("WARC-Date"), <TAB><TAB>}"
"<TAB><TAB>req_stream = ReqWrapper(input_buff, headers, params, self.create_buff_func) <TAB><TAB>while True: <TAB><TAB><TAB>buff = req_stream.read() <TAB><TAB><TAB>if not buff: <TAB><TAB><TAB><TAB>break <TAB><TAB>content_type = headers.get("Content-Type") <TAB><TAB>payload_length = req_stream.out.tell() <TAB><TAB>req_stream.out.seek(0) <TAB><TAB>record = self.writer.create_warc_record( <TAB><TAB><TAB>uri=params["url"], <TAB><TAB><TAB>record_type=record_type, <TAB><TAB><TAB>payload=req_stream.out, <TAB><TAB><TAB>length=payload_length, <TAB><TAB><TAB>warc_content_type=content_type, <TAB><TAB><TAB>warc_headers_dict=req_stream.headers, <TAB><TAB>) <TAB><TAB>self.writer.write_record(record, params) <TAB><TAB>msg = { <TAB><TAB><TAB>"success": "true", <TAB><TAB><TAB>"WARC-Date":<MASK>"
"def _put_record( <TAB>self, request_uri, input_buff, record_type, headers, params, start_response ): <TAB>if record_type == "stream": <TAB><TAB>if self.writer.write_stream_to_file(params, input_buff): <TAB><TAB><TAB>msg = {"success": "true"} <TAB><TAB>else: <TAB><TAB><TAB>msg = {"error_message": "upload_error"} <TAB><TAB>return self.send_message(msg, "200 OK", start_response) <TAB>req_stream = None <TAB>try: <TAB><TAB>req_stream = ReqWrapper(input_buff, headers, params, self.create_buff_func) <TAB><TAB>while True: <TAB><TAB><TAB>buff = req_stream.read() <TAB><TAB><TAB>if not buff: <TAB><TAB><TAB><TAB>break <TAB><TAB>content_type = headers.get("Content-Type") <TAB><TAB>payload_length = req_stream.out.tell() <TAB><TAB>req_stream.out.seek(0) <TAB><TAB>record = self.writer.create_warc_record( <TAB><TAB><TAB>uri=params["url"], <TAB><TAB><TAB>record_type=record_type, <TAB><TAB><TAB>payload=req_stream.out, <TAB><TAB><TAB>length=payload_length, <TAB><TAB><TAB>warc_content_type=content_type, <TAB><TAB><TAB>warc_headers_dict=req_stream.headers, <TAB><TAB>) <TAB><TAB>self.writer.write_record(record, params) <TAB><TAB>msg = { <TAB><TAB><TAB>"success": "true", <TAB><TAB><TAB>"WARC-Date":<MASK> <TAB>finally: <TAB><TAB>if req_stream: <TAB><TAB><TAB>no_except_close(req_stream.out) <TAB>return self.send_message(msg, "200 OK", start_response) "
"record.rec_headers.get_header("WARC-Date"), <TAB><TAB>}"
"def __build_queue(self, break_type, break_configs, break_time, break_duration): <TAB>""" <TAB>Build a queue of breaks. <TAB>""" <TAB>size = len(break_configs) <TAB>if 0 == size: <TAB><TAB># No breaks <TAB><TAB>return None <TAB>if self.__is_random_order: <TAB><TAB>breaks_order = random.sample(break_configs, size) <TAB>else: <TAB><TAB>breaks_order = break_configs <TAB>queue = [None] * size <TAB>for i, break_config in enumerate(breaks_order): <TAB><TAB>name = _(break_config["name"]) <TAB><TAB>duration = break_config.get("duration", break_duration) <TAB><TAB>image = break_config.get("image") <TAB><TAB>plugins = break_config.get("plugins", None) <TAB><TAB>interval = break_config.get("interval", break_time) <TAB><TAB># Validate time value <TAB><TAB>if not isinstance(duration, int) or duration <= 0: <TAB><TAB><TAB>logging.error("Invalid break duration in: " + str(break_config)) <TAB><TAB><TAB>continue <TAB><TAB>break_obj = Break(break_type, name, interval, duration, image, plugins) <TAB><TAB>queue[i] = break_obj <TAB>return queue "
"<TAB><TAB>name = _(break_config["name"]) <TAB><TAB>duration = break_config.get("duration", break_duration) <TAB><TAB>image = break_config.get("image") <TAB><TAB>plugins = break_config.get("plugins", None) <TAB><TAB>interval = break_config.get("interval", break_time) <TAB><TAB># Validate time value <TAB><TAB>if not isinstance(duration, int) or duration <= 0: <TAB><TAB><TAB>logging.error("Invalid break duration in: " + str(break_config)) <TAB><TAB><TAB>continue <TAB><TAB>break_obj = Break(break_type, name, interval, duration, image, plugins) <TAB><TAB>queue[i] = break_obj"
"<TAB><TAB>name = _(break_config["name"]) <TAB><TAB>duration = break_config.get("duration", break_duration) <TAB><TAB>image = break_config.get("image") <TAB><TAB>plugins = break_config.get("plugins", None) <TAB><TAB>interval = break_config.get("interval", break_time) <TAB><TAB># Validate time value <TAB><TAB>if not isinstance(duration, int) or duration <= 0: <TAB><TAB><TAB>logging.error("Invalid break duration in: "<MASK>"
"def __build_queue(self, break_type, break_configs, break_time, break_duration): <TAB>""" <TAB>Build a queue of breaks. <TAB>""" <TAB>size = len(break_configs) <TAB>if 0 == size: <TAB><TAB># No breaks <TAB><TAB>return None <TAB>if self.__is_random_order: <TAB><TAB>breaks_order = random.sample(break_configs, size) <TAB>else: <TAB><TAB>breaks_order = break_configs <TAB>queue = [None] * size <TAB>for i, break_config in enumerate(breaks_order): <TAB><TAB>name = _(break_config["name"]) <TAB><TAB>duration = break_config.get("duration", break_duration) <TAB><TAB>image = break_config.get("image") <TAB><TAB>plugins = break_config.get("plugins", None) <TAB><TAB>interval = break_config.get("interval", break_time) <TAB><TAB># Validate time value <TAB><TAB>if not isinstance(duration, int) or duration <= 0: <TAB><TAB><TAB>logging.error("Invalid break duration in: "<MASK> <TAB>return queue "
"+ str(break_config)) <TAB><TAB><TAB>continue <TAB><TAB>break_obj = Break(break_type, name, interval, duration, image, plugins) <TAB><TAB>queue[i] = break_obj"
"<TAB><TAB>name = _(break_config["name"]) <TAB><TAB>duration = break_config.get("duration", break_duration) <TAB><TAB>image = break_config.get("image") <TAB><TAB>plugins = break_config.get("plugins", None) <TAB><TAB>interval = break_config.get("interval", break_time) <TAB><TAB># Validate time value <TAB><TAB>if not isinstance(duration, int) or duration <= 0: <TAB><TAB><TAB>logging.error("Invalid break duration in: " + str(break_config)) <TAB><TAB><TAB>continue <TAB><TAB>break_obj = Break(break_type,<MASK>"
"def __build_queue(self, break_type, break_configs, break_time, break_duration): <TAB>""" <TAB>Build a queue of breaks. <TAB>""" <TAB>size = len(break_configs) <TAB>if 0 == size: <TAB><TAB># No breaks <TAB><TAB>return None <TAB>if self.__is_random_order: <TAB><TAB>breaks_order = random.sample(break_configs, size) <TAB>else: <TAB><TAB>breaks_order = break_configs <TAB>queue = [None] * size <TAB>for i, break_config in enumerate(breaks_order): <TAB><TAB>name = _(break_config["name"]) <TAB><TAB>duration = break_config.get("duration", break_duration) <TAB><TAB>image = break_config.get("image") <TAB><TAB>plugins = break_config.get("plugins", None) <TAB><TAB>interval = break_config.get("interval", break_time) <TAB><TAB># Validate time value <TAB><TAB>if not isinstance(duration, int) or duration <= 0: <TAB><TAB><TAB>logging.error("Invalid break duration in: " + str(break_config)) <TAB><TAB><TAB>continue <TAB><TAB>break_obj = Break(break_type,<MASK> <TAB>return queue "
"name, interval, duration, image, plugins) <TAB><TAB>queue[i] = break_obj"
"<TAB><TAB>name = _(break_config["name"]) <TAB><TAB>duration = break_config.get("duration", break_duration) <TAB><TAB>image = break_config.get("image") <TAB><TAB>plugins = break_config.get("plugins", None) <TAB><TAB>interval = break_config.get("interval", break_time) <TAB><TAB># Validate time value <TAB><TAB>if not isinstance(duration, int) or duration <= 0: <TAB><TAB><TAB>logging.error("Invalid break duration in: " + str(break_config)) <TAB><TAB><TAB>continue <TAB><TAB>break_obj = Break(break_type, name, interval, duration, image,<MASK>"
"def __build_queue(self, break_type, break_configs, break_time, break_duration): <TAB>""" <TAB>Build a queue of breaks. <TAB>""" <TAB>size = len(break_configs) <TAB>if 0 == size: <TAB><TAB># No breaks <TAB><TAB>return None <TAB>if self.__is_random_order: <TAB><TAB>breaks_order = random.sample(break_configs, size) <TAB>else: <TAB><TAB>breaks_order = break_configs <TAB>queue = [None] * size <TAB>for i, break_config in enumerate(breaks_order): <TAB><TAB>name = _(break_config["name"]) <TAB><TAB>duration = break_config.get("duration", break_duration) <TAB><TAB>image = break_config.get("image") <TAB><TAB>plugins = break_config.get("plugins", None) <TAB><TAB>interval = break_config.get("interval", break_time) <TAB><TAB># Validate time value <TAB><TAB>if not isinstance(duration, int) or duration <= 0: <TAB><TAB><TAB>logging.error("Invalid break duration in: " + str(break_config)) <TAB><TAB><TAB>continue <TAB><TAB>break_obj = Break(break_type, name, interval, duration, image,<MASK> <TAB>return queue "
"plugins) <TAB><TAB>queue[i] = break_obj"
"def walk2( <TAB>top, <TAB>topdown=True, <TAB>onerror=None, <TAB>followlinks=False, <TAB>ondecodeerror=None, <TAB>exclude_dirs=[], <TAB>dir_names_to_skip=[], ): <TAB>"""A version of `os.walk` that adds support for handling errors for <TAB>files that cannot be decoded with the default encoding. (See bug 82268.) <TAB>By default `UnicodeDecodeError`s from the os.listdir() call are <TAB>ignored. If optional arg 'ondecodeerror' is specified, it should be a <TAB>function; it will be called with one argument, the `UnicodeDecodeError` <TAB>instance. It can report the error to continue with the walk, or <TAB>raise the exception to abort the walk. <TAB>""" <TAB>try: <TAB><TAB>from scandir import scandir <TAB><TAB>return _walk2_via_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>) <TAB>except (ImportError, IOError): <TAB><TAB>return _walk2_wo_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>) "
"<TAB><TAB>from scandir import scandir <TAB><TAB>return _walk2_via_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>)"
"<TAB><TAB>from scandir import scandir <TAB><TAB>return _walk2_via_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <MASK>"
"def walk2( <TAB>top, <TAB>topdown=True, <TAB>onerror=None, <TAB>followlinks=False, <TAB>ondecodeerror=None, <TAB>exclude_dirs=[], <TAB>dir_names_to_skip=[], ): <TAB>"""A version of `os.walk` that adds support for handling errors for <TAB>files that cannot be decoded with the default encoding. (See bug 82268.) <TAB>By default `UnicodeDecodeError`s from the os.listdir() call are <TAB>ignored. If optional arg 'ondecodeerror' is specified, it should be a <TAB>function; it will be called with one argument, the `UnicodeDecodeError` <TAB>instance. It can report the error to continue with the walk, or <TAB>raise the exception to abort the walk. <TAB>""" <TAB>try: <TAB><TAB>from scandir import scandir <TAB><TAB>return _walk2_via_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <MASK> <TAB>except (ImportError, IOError): <TAB><TAB>return _walk2_wo_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>) "
"<TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>)"
"<TAB><TAB>from scandir import scandir <TAB><TAB>return _walk2_via_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <MASK>"
"def walk2( <TAB>top, <TAB>topdown=True, <TAB>onerror=None, <TAB>followlinks=False, <TAB>ondecodeerror=None, <TAB>exclude_dirs=[], <TAB>dir_names_to_skip=[], ): <TAB>"""A version of `os.walk` that adds support for handling errors for <TAB>files that cannot be decoded with the default encoding. (See bug 82268.) <TAB>By default `UnicodeDecodeError`s from the os.listdir() call are <TAB>ignored. If optional arg 'ondecodeerror' is specified, it should be a <TAB>function; it will be called with one argument, the `UnicodeDecodeError` <TAB>instance. It can report the error to continue with the walk, or <TAB>raise the exception to abort the walk. <TAB>""" <TAB>try: <TAB><TAB>from scandir import scandir <TAB><TAB>return _walk2_via_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <MASK> <TAB>except (ImportError, IOError): <TAB><TAB>return _walk2_wo_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>) "
"<TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>)"
"<TAB><TAB>from scandir import scandir <TAB><TAB>return _walk2_via_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <MASK>"
"def walk2( <TAB>top, <TAB>topdown=True, <TAB>onerror=None, <TAB>followlinks=False, <TAB>ondecodeerror=None, <TAB>exclude_dirs=[], <TAB>dir_names_to_skip=[], ): <TAB>"""A version of `os.walk` that adds support for handling errors for <TAB>files that cannot be decoded with the default encoding. (See bug 82268.) <TAB>By default `UnicodeDecodeError`s from the os.listdir() call are <TAB>ignored. If optional arg 'ondecodeerror' is specified, it should be a <TAB>function; it will be called with one argument, the `UnicodeDecodeError` <TAB>instance. It can report the error to continue with the walk, or <TAB>raise the exception to abort the walk. <TAB>""" <TAB>try: <TAB><TAB>from scandir import scandir <TAB><TAB>return _walk2_via_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <MASK> <TAB>except (ImportError, IOError): <TAB><TAB>return _walk2_wo_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>) "
"<TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>)"
"def walk2( <TAB>top, <TAB>topdown=True, <TAB>onerror=None, <TAB>followlinks=False, <TAB>ondecodeerror=None, <TAB>exclude_dirs=[], <TAB>dir_names_to_skip=[], ): <TAB>"""A version of `os.walk` that adds support for handling errors for <TAB>files that cannot be decoded with the default encoding. (See bug 82268.) <TAB>By default `UnicodeDecodeError`s from the os.listdir() call are <TAB>ignored. If optional arg 'ondecodeerror' is specified, it should be a <TAB>function; it will be called with one argument, the `UnicodeDecodeError` <TAB>instance. It can report the error to continue with the walk, or <TAB>raise the exception to abort the walk. <TAB>""" <TAB>try: <TAB><TAB>from scandir import scandir <TAB><TAB>return _walk2_via_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>) <TAB>except (ImportError, IOError): <TAB><TAB>return _walk2_wo_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>) "
"<TAB><TAB>return _walk2_wo_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>)"
"<TAB><TAB>return _walk2_wo_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <MASK>"
"def walk2( <TAB>top, <TAB>topdown=True, <TAB>onerror=None, <TAB>followlinks=False, <TAB>ondecodeerror=None, <TAB>exclude_dirs=[], <TAB>dir_names_to_skip=[], ): <TAB>"""A version of `os.walk` that adds support for handling errors for <TAB>files that cannot be decoded with the default encoding. (See bug 82268.) <TAB>By default `UnicodeDecodeError`s from the os.listdir() call are <TAB>ignored. If optional arg 'ondecodeerror' is specified, it should be a <TAB>function; it will be called with one argument, the `UnicodeDecodeError` <TAB>instance. It can report the error to continue with the walk, or <TAB>raise the exception to abort the walk. <TAB>""" <TAB>try: <TAB><TAB>from scandir import scandir <TAB><TAB>return _walk2_via_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>) <TAB>except (ImportError, IOError): <TAB><TAB>return _walk2_wo_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <MASK> "
"<TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>)"
"<TAB><TAB>return _walk2_wo_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <MASK>"
"def walk2( <TAB>top, <TAB>topdown=True, <TAB>onerror=None, <TAB>followlinks=False, <TAB>ondecodeerror=None, <TAB>exclude_dirs=[], <TAB>dir_names_to_skip=[], ): <TAB>"""A version of `os.walk` that adds support for handling errors for <TAB>files that cannot be decoded with the default encoding. (See bug 82268.) <TAB>By default `UnicodeDecodeError`s from the os.listdir() call are <TAB>ignored. If optional arg 'ondecodeerror' is specified, it should be a <TAB>function; it will be called with one argument, the `UnicodeDecodeError` <TAB>instance. It can report the error to continue with the walk, or <TAB>raise the exception to abort the walk. <TAB>""" <TAB>try: <TAB><TAB>from scandir import scandir <TAB><TAB>return _walk2_via_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>) <TAB>except (ImportError, IOError): <TAB><TAB>return _walk2_wo_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <MASK> "
"<TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>)"
"<TAB><TAB>return _walk2_wo_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <MASK>"
"def walk2( <TAB>top, <TAB>topdown=True, <TAB>onerror=None, <TAB>followlinks=False, <TAB>ondecodeerror=None, <TAB>exclude_dirs=[], <TAB>dir_names_to_skip=[], ): <TAB>"""A version of `os.walk` that adds support for handling errors for <TAB>files that cannot be decoded with the default encoding. (See bug 82268.) <TAB>By default `UnicodeDecodeError`s from the os.listdir() call are <TAB>ignored. If optional arg 'ondecodeerror' is specified, it should be a <TAB>function; it will be called with one argument, the `UnicodeDecodeError` <TAB>instance. It can report the error to continue with the walk, or <TAB>raise the exception to abort the walk. <TAB>""" <TAB>try: <TAB><TAB>from scandir import scandir <TAB><TAB>return _walk2_via_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>) <TAB>except (ImportError, IOError): <TAB><TAB>return _walk2_wo_scandir( <TAB><TAB><TAB>top, <TAB><TAB><TAB>topdown, <TAB><TAB><TAB>onerror, <TAB><TAB><TAB>followlinks, <TAB><TAB><TAB>ondecodeerror, <TAB><TAB><TAB>exclude_dirs, <MASK> "
"<TAB><TAB><TAB>dir_names_to_skip, <TAB><TAB>)"
"def _find_dists(dists): <TAB>uploads = [] <TAB>for filename in dists: <TAB><TAB>if os.path.exists(filename): <TAB><TAB><TAB>uploads.append(filename) <TAB><TAB><TAB>continue <TAB><TAB># The filename didn't exist so it may be a glob <TAB><TAB>files = glob.glob(filename) <TAB><TAB># If nothing matches, files is [] <TAB><TAB>if not files: <TAB><TAB><TAB>raise exceptions.InvalidDistribution( <TAB><TAB><TAB><TAB>"Cannot find file (or expand pattern): '%s'" % filename <TAB><TAB><TAB>) <TAB><TAB># Otherwise, files will be filenames that exist <TAB><TAB>uploads.extend(files) <TAB>return _group_wheel_files_first(uploads) "
"<TAB><TAB>if os.path.exists(filename): <TAB><TAB><TAB>uploads.append(filename) <TAB><TAB><TAB>continue <TAB><TAB># The filename didn't exist so it may be a glob <TAB><TAB>files = glob.glob(filename) <TAB><TAB># If nothing matches, files is [] <TAB><TAB>if not files: <TAB><TAB><TAB>raise exceptions.InvalidDistribution( <TAB><TAB><TAB><TAB>"Cannot find file (or expand pattern): '%s'" % filename <TAB><TAB><TAB>) <TAB><TAB># Otherwise, files will be filenames that exist <TAB><TAB>uploads.extend(files)"
"<TAB><TAB>if os.path.exists(filename): <TAB><TAB><TAB>uploads.append(filename) <TAB><TAB><TAB>continue <TAB><TAB># The filename didn't exist so it may be a glob <TAB><TAB>files = glob.glob(filename) <TAB><TAB># If nothing matches, files is [] <TAB><TAB>if<MASK>"
"def _find_dists(dists): <TAB>uploads = [] <TAB>for filename in dists: <TAB><TAB>if os.path.exists(filename): <TAB><TAB><TAB>uploads.append(filename) <TAB><TAB><TAB>continue <TAB><TAB># The filename didn't exist so it may be a glob <TAB><TAB>files = glob.glob(filename) <TAB><TAB># If nothing matches, files is [] <TAB><TAB>if<MASK> <TAB>return _group_wheel_files_first(uploads) "
"not files: <TAB><TAB><TAB>raise exceptions.InvalidDistribution( <TAB><TAB><TAB><TAB>"Cannot find file (or expand pattern): '%s'" % filename <TAB><TAB><TAB>) <TAB><TAB>uploads.extend(files)"
"<TAB><TAB>if os.path.exists(filename): <TAB><TAB><TAB>uploads.append(filename) <TAB><TAB><TAB>continue <TAB><TAB># The filename didn't exist so it may be a glob <TAB><TAB>files = glob.glob(filename) <TAB><TAB># If nothing matches, files is [] <TAB><TAB>if not files: <TAB><TAB><TAB>raise exceptions.InvalidDistribution( <TAB><TAB><TAB><TAB>"Cannot find file (or expand<MASK>"
"def _find_dists(dists): <TAB>uploads = [] <TAB>for filename in dists: <TAB><TAB>if os.path.exists(filename): <TAB><TAB><TAB>uploads.append(filename) <TAB><TAB><TAB>continue <TAB><TAB># The filename didn't exist so it may be a glob <TAB><TAB>files = glob.glob(filename) <TAB><TAB># If nothing matches, files is [] <TAB><TAB>if not files: <TAB><TAB><TAB>raise exceptions.InvalidDistribution( <TAB><TAB><TAB><TAB>"Cannot find file (or expand<MASK> <TAB>return _group_wheel_files_first(uploads) "
"pattern): '%s'" % filename <TAB><TAB><TAB>) <TAB><TAB>uploads.extend(files)"
"<TAB><TAB>if os.path.exists(filename): <TAB><TAB><TAB>uploads.append(filename) <TAB><TAB><TAB>continue <TAB><TAB># The filename didn't exist so it may be a glob <TAB><TAB>files = glob.glob(filename) <TAB><TAB># If nothing matches, files is [] <TAB><TAB>if not files: <TAB><TAB><TAB>raise exceptions.InvalidDistribution( <TAB><TAB><TAB><TAB>"Cannot find file (or expand pattern): '%s'" % filename <MASK>"
"def _find_dists(dists): <TAB>uploads = [] <TAB>for filename in dists: <TAB><TAB>if os.path.exists(filename): <TAB><TAB><TAB>uploads.append(filename) <TAB><TAB><TAB>continue <TAB><TAB># The filename didn't exist so it may be a glob <TAB><TAB>files = glob.glob(filename) <TAB><TAB># If nothing matches, files is [] <TAB><TAB>if not files: <TAB><TAB><TAB>raise exceptions.InvalidDistribution( <TAB><TAB><TAB><TAB>"Cannot find file (or expand pattern): '%s'" % filename <MASK> <TAB>return _group_wheel_files_first(uploads) "
"<TAB><TAB><TAB>) <TAB><TAB>uploads.extend(files)"
"def _replace_words(replacements, string): <TAB>"""Replace words with corresponding values in replacements dict. <TAB>Words must be separated by spaces or newlines. <TAB>""" <TAB>output_lines = [] <TAB>for line in string.split("\n"): <TAB><TAB>output_words = [] <TAB><TAB>for word in line.split(" "): <TAB><TAB><TAB>new_word = replacements.get(word, word) <TAB><TAB><TAB>output_words.append(new_word) <TAB><TAB>output_lines.append(output_words) <TAB>return "\n".join(" ".join(output_words) for output_words in output_lines) "
"<TAB><TAB>output_words = [] <TAB><TAB>for word in line.split(" "): <TAB><TAB><TAB>new_word = replacements.get(word, word) <TAB><TAB><TAB>output_words.append(new_word) <TAB><TAB>output_lines.append(output_words)"
"<TAB><TAB>output_words = [] <TAB><TAB>for word in<MASK>"
"def _replace_words(replacements, string): <TAB>"""Replace words with corresponding values in replacements dict. <TAB>Words must be separated by spaces or newlines. <TAB>""" <TAB>output_lines = [] <TAB>for line in string.split("\n"): <TAB><TAB>output_words = [] <TAB><TAB>for word in<MASK> <TAB>return "\n".join(" ".join(output_words) for output_words in output_lines) "
"line.split(" "): <TAB><TAB><TAB>new_word = replacements.get(word, word) <TAB><TAB><TAB>output_words.append(new_word) <TAB><TAB>output_lines.append(output_words)"
"<TAB><TAB>output_words = [] <TAB><TAB>for word in line.split(" "): <TAB><TAB><TAB>new_word = replacements.get(word, word) <MASK>"
"def _replace_words(replacements, string): <TAB>"""Replace words with corresponding values in replacements dict. <TAB>Words must be separated by spaces or newlines. <TAB>""" <TAB>output_lines = [] <TAB>for line in string.split("\n"): <TAB><TAB>output_words = [] <TAB><TAB>for word in line.split(" "): <TAB><TAB><TAB>new_word = replacements.get(word, word) <MASK> <TAB>return "\n".join(" ".join(output_words) for output_words in output_lines) "
"<TAB><TAB><TAB>output_words.append(new_word) <TAB><TAB>output_lines.append(output_words)"
"<TAB><TAB>output_words = [] <TAB><TAB>for word in line.split(" "): <TAB><TAB><TAB>new_word = replacements.get(word, word) <TAB><TAB><TAB>output_words.append(new_word) <MASK>"
"def _replace_words(replacements, string): <TAB>"""Replace words with corresponding values in replacements dict. <TAB>Words must be separated by spaces or newlines. <TAB>""" <TAB>output_lines = [] <TAB>for line in string.split("\n"): <TAB><TAB>output_words = [] <TAB><TAB>for word in line.split(" "): <TAB><TAB><TAB>new_word = replacements.get(word, word) <TAB><TAB><TAB>output_words.append(new_word) <MASK> <TAB>return "\n".join(" ".join(output_words) for output_words in output_lines) "
"<TAB><TAB>output_lines.append(output_words)"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args) <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args) <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample() <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet)) "
"<TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args)"
"<TAB><TAB>stream<MASK>"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream<MASK> <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args) <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample() <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet)) "
"= TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args)"
"<TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <MASK>"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <MASK> <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args) <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample() <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet)) "
"<TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args)"
"<TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <MASK>"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <MASK> <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args) <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample() <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet)) "
"<TAB><TAB>tweet_iter = stream.user(**query_args)"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args) <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args) <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample() <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet)) "
"<TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args)"
"<TAB><TAB>stream =<MASK>"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args) <TAB>elif args.site_stream: <TAB><TAB>stream =<MASK> <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample() <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet)) "
"TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args)"
"<TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <MASK>"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args) <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <MASK> <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample() <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet)) "
"<TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args)"
"<TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <MASK>"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args) <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <MASK> <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample() <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet)) "
"<TAB><TAB>tweet_iter = stream.site(**query_args)"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args) <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args) <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample() <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet)) "
"<TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample()"
"<TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <MASK>"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args) <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args) <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <MASK> <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet)) "
"<TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample()"
"<TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <MASK>"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args) <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args) <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <MASK> <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet)) "
"<TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample()"
"<TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <MASK>"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args) <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args) <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <MASK> <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet)) "
"<TAB><TAB><TAB>tweet_iter = stream.statuses.sample()"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args) <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args) <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample() <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet)) "
"<TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet))"
"<TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <MASK>"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args) <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args) <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample() <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <MASK> "
"<TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet))"
"<TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <MASK>"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args) <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args) <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample() <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <MASK> "
"<TAB><TAB>else: <TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet))"
"<TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <MASK>"
"def main(): <TAB>args = parse_arguments() <TAB># When using twitter stream you must authorize. <TAB>auth = OAuth(args.token, args.token_secret, args.consumer_key, args.consumer_secret) <TAB># These arguments are optional: <TAB>stream_args = dict( <TAB><TAB>timeout=args.timeout, <TAB><TAB>block=not args.no_block, <TAB><TAB>heartbeat_timeout=args.heartbeat_timeout, <TAB>) <TAB>query_args = dict() <TAB>if args.track_keywords: <TAB><TAB>query_args["track"] = args.track_keywords <TAB>if args.user_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="userstream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.user(**query_args) <TAB>elif args.site_stream: <TAB><TAB>stream = TwitterStream( <TAB><TAB><TAB>auth=auth, domain="sitestream.twitter.com", **stream_args <TAB><TAB>) <TAB><TAB>tweet_iter = stream.site(**query_args) <TAB>else: <TAB><TAB>stream = TwitterStream(auth=auth, **stream_args) <TAB><TAB>if args.track_keywords: <TAB><TAB><TAB>tweet_iter = stream.statuses.filter(**query_args) <TAB><TAB>else: <TAB><TAB><TAB>tweet_iter = stream.statuses.sample() <TAB># Iterate over the sample stream. <TAB>for tweet in tweet_iter: <TAB><TAB># You must test that your tweet has text. It might be a delete <TAB><TAB># or data message. <TAB><TAB>if tweet is None: <TAB><TAB><TAB>printNicely("-- None --") <TAB><TAB>elif tweet is Timeout: <TAB><TAB><TAB>printNicely("-- Timeout --") <TAB><TAB>elif tweet is HeartbeatTimeout: <TAB><TAB><TAB>printNicely("-- Heartbeat Timeout --") <TAB><TAB>elif tweet is Hangup: <TAB><TAB><TAB>printNicely("-- Hangup --") <TAB><TAB>elif tweet.get("text"): <TAB><TAB><TAB>printNicely(tweet["text"]) <TAB><TAB>else: <MASK> "
"<TAB><TAB><TAB>printNicely("-- Some data: " + str(tweet))"
"def subanalyses(self, subject_id, data): <TAB>analyses = self.app_param.evaluation_units.split(",") <TAB>tasks = [] <TAB>for analysis in analyses: <TAB><TAB>if analysis in ["foreground", "label"]: <TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis == "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <TAB><TAB><TAB><TAB>tasks.append({"subject_id": subject_id, "label": label}) <TAB><TAB>elif analysis in ["cc"]: <TAB><TAB><TAB>cc_seg, cc_ref = connected_components( <TAB><TAB><TAB><TAB>data["inferred"], data["label"], self.app_param.output_prob <TAB><TAB><TAB>) <TAB><TAB><TAB>cc_func = union_of_seg_for_each_ref_cc # TODO make into factory <TAB><TAB><TAB>conncomps = cc_func(cc_seg, cc_ref) <TAB><TAB><TAB>for conncomp in conncomps: <TAB><TAB><TAB><TAB>tasks.append( <TAB><TAB><TAB><TAB><TAB>{"subject_id": subject_id, "cc_labels": conncomps[conncomp]} <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB># TODO save an index image from blobs_ref[0] <TAB>return tasks "
"<TAB><TAB>if analysis in ["foreground", "label"]: <TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis == "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <TAB><TAB><TAB><TAB>tasks.append({"subject_id": subject_id, "label": label}) <TAB><TAB>elif analysis in ["cc"]: <TAB><TAB><TAB>cc_seg, cc_ref = connected_components( <TAB><TAB><TAB><TAB>data["inferred"], data["label"], self.app_param.output_prob <TAB><TAB><TAB>) <TAB><TAB><TAB>cc_func = union_of_seg_for_each_ref_cc # TODO make into factory <TAB><TAB><TAB>conncomps = cc_func(cc_seg, cc_ref) <TAB><TAB><TAB>for conncomp in conncomps: <TAB><TAB><TAB><TAB>tasks.append( <TAB><TAB><TAB><TAB><TAB>{"subject_id": subject_id, "cc_labels": conncomps[conncomp]} <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB># TODO save an index image from blobs_ref[0]"
"<TAB><TAB>if analysis in ["foreground", "label"]: <TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis == "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <TAB><TAB><TAB><TAB>tasks.append({"subject_id": subject_id, "label": label}) <TAB><TAB>elif analysis in ["cc"]: <TAB><TAB><TAB>cc_seg, cc_ref = connected_components( <TAB><TAB><TAB><TAB>data["inferred"], data["label"], self.app_param.output_prob <TAB><TAB><TAB>) <TAB><TAB><TAB>cc_func = union_of_seg_for_each_ref_cc # TODO make into factory <TAB><TAB><TAB>conncomps = cc_func(cc_seg, cc_ref) <TAB><TAB><TAB>for<MASK>"
"def subanalyses(self, subject_id, data): <TAB>analyses = self.app_param.evaluation_units.split(",") <TAB>tasks = [] <TAB>for analysis in analyses: <TAB><TAB>if analysis in ["foreground", "label"]: <TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis == "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <TAB><TAB><TAB><TAB>tasks.append({"subject_id": subject_id, "label": label}) <TAB><TAB>elif analysis in ["cc"]: <TAB><TAB><TAB>cc_seg, cc_ref = connected_components( <TAB><TAB><TAB><TAB>data["inferred"], data["label"], self.app_param.output_prob <TAB><TAB><TAB>) <TAB><TAB><TAB>cc_func = union_of_seg_for_each_ref_cc # TODO make into factory <TAB><TAB><TAB>conncomps = cc_func(cc_seg, cc_ref) <TAB><TAB><TAB>for<MASK> <TAB>return tasks "
"conncomp in conncomps: <TAB><TAB><TAB><TAB>tasks.append( <TAB><TAB><TAB><TAB><TAB>{"subject_id": subject_id, "cc_labels": conncomps[conncomp]} <TAB><TAB><TAB><TAB>)"
"<TAB><TAB>if analysis in ["foreground", "label"]: <TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis == "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <TAB><TAB><TAB><TAB>tasks.append({"subject_id": subject_id, "label": label}) <TAB><TAB>elif analysis in ["cc"]: <TAB><TAB><TAB>cc_seg, cc_ref = connected_components( <TAB><TAB><TAB><TAB>data["inferred"], data["label"], self.app_param.output_prob <TAB><TAB><TAB>) <TAB><TAB><TAB>cc_func = union_of_seg_for_each_ref_cc # TODO make into factory <TAB><TAB><TAB>conncomps = cc_func(cc_seg, cc_ref) <TAB><TAB><TAB>for conncomp in conncomps: <TAB><TAB><TAB><TAB>tasks.append( <TAB><TAB><TAB><TAB><TAB>{"subject_id":<MASK>"
"def subanalyses(self, subject_id, data): <TAB>analyses = self.app_param.evaluation_units.split(",") <TAB>tasks = [] <TAB>for analysis in analyses: <TAB><TAB>if analysis in ["foreground", "label"]: <TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis == "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <TAB><TAB><TAB><TAB>tasks.append({"subject_id": subject_id, "label": label}) <TAB><TAB>elif analysis in ["cc"]: <TAB><TAB><TAB>cc_seg, cc_ref = connected_components( <TAB><TAB><TAB><TAB>data["inferred"], data["label"], self.app_param.output_prob <TAB><TAB><TAB>) <TAB><TAB><TAB>cc_func = union_of_seg_for_each_ref_cc # TODO make into factory <TAB><TAB><TAB>conncomps = cc_func(cc_seg, cc_ref) <TAB><TAB><TAB>for conncomp in conncomps: <TAB><TAB><TAB><TAB>tasks.append( <TAB><TAB><TAB><TAB><TAB>{"subject_id":<MASK> <TAB>return tasks "
"subject_id, "cc_labels": conncomps[conncomp]} <TAB><TAB><TAB><TAB>)"
"<TAB><TAB>if analysis in ["foreground", "label"]: <TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis == "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <TAB><TAB><TAB><TAB>tasks.append({"subject_id": subject_id, "label": label}) <TAB><TAB>elif analysis in ["cc"]: <TAB><TAB><TAB>cc_seg, cc_ref = connected_components( <TAB><TAB><TAB><TAB>data["inferred"], data["label"], self.app_param.output_prob <TAB><TAB><TAB>) <TAB><TAB><TAB>cc_func = union_of_seg_for_each_ref_cc # TODO make into factory <TAB><TAB><TAB>conncomps = cc_func(cc_seg, cc_ref) <TAB><TAB><TAB>for conncomp in conncomps: <TAB><TAB><TAB><TAB>tasks.append( <TAB><TAB><TAB><TAB><TAB>{"subject_id": subject_id, "cc_labels":<MASK>"
"def subanalyses(self, subject_id, data): <TAB>analyses = self.app_param.evaluation_units.split(",") <TAB>tasks = [] <TAB>for analysis in analyses: <TAB><TAB>if analysis in ["foreground", "label"]: <TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis == "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <TAB><TAB><TAB><TAB>tasks.append({"subject_id": subject_id, "label": label}) <TAB><TAB>elif analysis in ["cc"]: <TAB><TAB><TAB>cc_seg, cc_ref = connected_components( <TAB><TAB><TAB><TAB>data["inferred"], data["label"], self.app_param.output_prob <TAB><TAB><TAB>) <TAB><TAB><TAB>cc_func = union_of_seg_for_each_ref_cc # TODO make into factory <TAB><TAB><TAB>conncomps = cc_func(cc_seg, cc_ref) <TAB><TAB><TAB>for conncomp in conncomps: <TAB><TAB><TAB><TAB>tasks.append( <TAB><TAB><TAB><TAB><TAB>{"subject_id": subject_id, "cc_labels":<MASK> <TAB>return tasks "
"conncomps[conncomp]} <TAB><TAB><TAB><TAB>)"
"def subanalyses(self, subject_id, data): <TAB>analyses = self.app_param.evaluation_units.split(",") <TAB>tasks = [] <TAB>for analysis in analyses: <TAB><TAB>if analysis in ["foreground", "label"]: <TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis == "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <TAB><TAB><TAB><TAB>tasks.append({"subject_id": subject_id, "label": label}) <TAB><TAB>elif analysis in ["cc"]: <TAB><TAB><TAB>cc_seg, cc_ref = connected_components( <TAB><TAB><TAB><TAB>data["inferred"], data["label"], self.app_param.output_prob <TAB><TAB><TAB>) <TAB><TAB><TAB>cc_func = union_of_seg_for_each_ref_cc # TODO make into factory <TAB><TAB><TAB>conncomps = cc_func(cc_seg, cc_ref) <TAB><TAB><TAB>for conncomp in conncomps: <TAB><TAB><TAB><TAB>tasks.append( <TAB><TAB><TAB><TAB><TAB>{"subject_id": subject_id, "cc_labels": conncomps[conncomp]} <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB># TODO save an index image from blobs_ref[0] <TAB>return tasks "
"<TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis == "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <TAB><TAB><TAB><TAB>tasks.append({"subject_id": subject_id, "label": label})"
"<TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis<MASK>"
"def subanalyses(self, subject_id, data): <TAB>analyses = self.app_param.evaluation_units.split(",") <TAB>tasks = [] <TAB>for analysis in analyses: <TAB><TAB>if analysis in ["foreground", "label"]: <TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis<MASK> <TAB><TAB>elif analysis in ["cc"]: <TAB><TAB><TAB>cc_seg, cc_ref = connected_components( <TAB><TAB><TAB><TAB>data["inferred"], data["label"], self.app_param.output_prob <TAB><TAB><TAB>) <TAB><TAB><TAB>cc_func = union_of_seg_for_each_ref_cc # TODO make into factory <TAB><TAB><TAB>conncomps = cc_func(cc_seg, cc_ref) <TAB><TAB><TAB>for conncomp in conncomps: <TAB><TAB><TAB><TAB>tasks.append( <TAB><TAB><TAB><TAB><TAB>{"subject_id": subject_id, "cc_labels": conncomps[conncomp]} <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB># TODO save an index image from blobs_ref[0] <TAB>return tasks "
"== "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <TAB><TAB><TAB><TAB>tasks.append({"subject_id": subject_id, "label": label})"
"<TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis == "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <MASK>"
"def subanalyses(self, subject_id, data): <TAB>analyses = self.app_param.evaluation_units.split(",") <TAB>tasks = [] <TAB>for analysis in analyses: <TAB><TAB>if analysis in ["foreground", "label"]: <TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis == "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <MASK> <TAB><TAB>elif analysis in ["cc"]: <TAB><TAB><TAB>cc_seg, cc_ref = connected_components( <TAB><TAB><TAB><TAB>data["inferred"], data["label"], self.app_param.output_prob <TAB><TAB><TAB>) <TAB><TAB><TAB>cc_func = union_of_seg_for_each_ref_cc # TODO make into factory <TAB><TAB><TAB>conncomps = cc_func(cc_seg, cc_ref) <TAB><TAB><TAB>for conncomp in conncomps: <TAB><TAB><TAB><TAB>tasks.append( <TAB><TAB><TAB><TAB><TAB>{"subject_id": subject_id, "cc_labels": conncomps[conncomp]} <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB># TODO save an index image from blobs_ref[0] <TAB>return tasks "
"<TAB><TAB><TAB><TAB>tasks.append({"subject_id": subject_id, "label": label})"
"<TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis == "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <MASK>"
"def subanalyses(self, subject_id, data): <TAB>analyses = self.app_param.evaluation_units.split(",") <TAB>tasks = [] <TAB>for analysis in analyses: <TAB><TAB>if analysis in ["foreground", "label"]: <TAB><TAB><TAB>labels = list(range(self.app_param.num_classes)) <TAB><TAB><TAB>if analysis == "foreground": <TAB><TAB><TAB><TAB>labels.remove(0) <TAB><TAB><TAB>for label in labels: <MASK> <TAB><TAB>elif analysis in ["cc"]: <TAB><TAB><TAB>cc_seg, cc_ref = connected_components( <TAB><TAB><TAB><TAB>data["inferred"], data["label"], self.app_param.output_prob <TAB><TAB><TAB>) <TAB><TAB><TAB>cc_func = union_of_seg_for_each_ref_cc # TODO make into factory <TAB><TAB><TAB>conncomps = cc_func(cc_seg, cc_ref) <TAB><TAB><TAB>for conncomp in conncomps: <TAB><TAB><TAB><TAB>tasks.append( <TAB><TAB><TAB><TAB><TAB>{"subject_id": subject_id, "cc_labels": conncomps[conncomp]} <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB><TAB># TODO save an index image from blobs_ref[0] <TAB>return tasks "
"<TAB><TAB><TAB><TAB>tasks.append({"subject_id": subject_id, "label": label})"
"def test_valid_clean_providers(self): <TAB>self.useFixture( <TAB><TAB>fixtures.EnvironmentVariable("SNAPCRAFT_BUILD_ENVIRONMENT", self.env) <TAB>) <TAB>if self.result == "host": <TAB><TAB>result = self.run_command( <TAB><TAB><TAB>[self.step, "--provider", self.arg, "--destructive-mode"] <TAB><TAB>) <TAB><TAB>self.mock_get_provider_for.assert_not_called() <TAB>else: <TAB><TAB>result = self.run_command([self.step, "--provider", self.arg]) <TAB><TAB>self.mock_get_provider_for.assert_called_once_with(self.result) <TAB>self.assertThat(result.exit_code, Equals(0)) "
"<TAB><TAB>result = self.run_command( <TAB><TAB><TAB>[self.step, "--provider", self.arg, "--destructive-mode"] <TAB><TAB>) <TAB><TAB>self.mock_get_provider_for.assert_not_called()"
"<TAB><TAB>result =<MASK>"
"def test_valid_clean_providers(self): <TAB>self.useFixture( <TAB><TAB>fixtures.EnvironmentVariable("SNAPCRAFT_BUILD_ENVIRONMENT", self.env) <TAB>) <TAB>if self.result == "host": <TAB><TAB>result =<MASK> <TAB>else: <TAB><TAB>result = self.run_command([self.step, "--provider", self.arg]) <TAB><TAB>self.mock_get_provider_for.assert_called_once_with(self.result) <TAB>self.assertThat(result.exit_code, Equals(0)) "
"self.run_command( <TAB><TAB><TAB>[self.step, "--provider", self.arg, "--destructive-mode"] <TAB><TAB>) <TAB><TAB>self.mock_get_provider_for.assert_not_called()"
"<TAB><TAB>result = self.run_command( <TAB><TAB><TAB>[self.step, "--provider", self.arg, "--destructive-mode"] <MASK>"
"def test_valid_clean_providers(self): <TAB>self.useFixture( <TAB><TAB>fixtures.EnvironmentVariable("SNAPCRAFT_BUILD_ENVIRONMENT", self.env) <TAB>) <TAB>if self.result == "host": <TAB><TAB>result = self.run_command( <TAB><TAB><TAB>[self.step, "--provider", self.arg, "--destructive-mode"] <MASK> <TAB>else: <TAB><TAB>result = self.run_command([self.step, "--provider", self.arg]) <TAB><TAB>self.mock_get_provider_for.assert_called_once_with(self.result) <TAB>self.assertThat(result.exit_code, Equals(0)) "
"<TAB><TAB>) <TAB><TAB>self.mock_get_provider_for.assert_not_called()"
"<TAB><TAB>result = self.run_command( <TAB><TAB><TAB>[self.step, "--provider", self.arg, "--destructive-mode"] <TAB><TAB>) <MASK>"
"def test_valid_clean_providers(self): <TAB>self.useFixture( <TAB><TAB>fixtures.EnvironmentVariable("SNAPCRAFT_BUILD_ENVIRONMENT", self.env) <TAB>) <TAB>if self.result == "host": <TAB><TAB>result = self.run_command( <TAB><TAB><TAB>[self.step, "--provider", self.arg, "--destructive-mode"] <TAB><TAB>) <MASK> <TAB>else: <TAB><TAB>result = self.run_command([self.step, "--provider", self.arg]) <TAB><TAB>self.mock_get_provider_for.assert_called_once_with(self.result) <TAB>self.assertThat(result.exit_code, Equals(0)) "
"<TAB><TAB>self.mock_get_provider_for.assert_not_called()"
"def test_valid_clean_providers(self): <TAB>self.useFixture( <TAB><TAB>fixtures.EnvironmentVariable("SNAPCRAFT_BUILD_ENVIRONMENT", self.env) <TAB>) <TAB>if self.result == "host": <TAB><TAB>result = self.run_command( <TAB><TAB><TAB>[self.step, "--provider", self.arg, "--destructive-mode"] <TAB><TAB>) <TAB><TAB>self.mock_get_provider_for.assert_not_called() <TAB>else: <TAB><TAB>result = self.run_command([self.step, "--provider", self.arg]) <TAB><TAB>self.mock_get_provider_for.assert_called_once_with(self.result) <TAB>self.assertThat(result.exit_code, Equals(0)) "
"<TAB><TAB>result = self.run_command([self.step, "--provider", self.arg]) <TAB><TAB>self.mock_get_provider_for.assert_called_once_with(self.result)"
"<MASK>"
"def test_valid_clean_providers(self): <TAB>self.useFixture( <TAB><TAB>fixtures.EnvironmentVariable("SNAPCRAFT_BUILD_ENVIRONMENT", self.env) <TAB>) <TAB>if self.result == "host": <TAB><TAB>result = self.run_command( <TAB><TAB><TAB>[self.step, "--provider", self.arg, "--destructive-mode"] <TAB><TAB>) <TAB><TAB>self.mock_get_provider_for.assert_not_called() <TAB>else: <MASK> <TAB>self.assertThat(result.exit_code, Equals(0)) "
"<TAB><TAB>result = self.run_command([self.step, "--provider", self.arg]) <TAB><TAB>self.mock_get_provider_for.assert_called_once_with(self.result)"
"<TAB><TAB>result = self.run_command([self.step, "--provider", self.arg]) <MASK>"
"def test_valid_clean_providers(self): <TAB>self.useFixture( <TAB><TAB>fixtures.EnvironmentVariable("SNAPCRAFT_BUILD_ENVIRONMENT", self.env) <TAB>) <TAB>if self.result == "host": <TAB><TAB>result = self.run_command( <TAB><TAB><TAB>[self.step, "--provider", self.arg, "--destructive-mode"] <TAB><TAB>) <TAB><TAB>self.mock_get_provider_for.assert_not_called() <TAB>else: <TAB><TAB>result = self.run_command([self.step, "--provider", self.arg]) <MASK> <TAB>self.assertThat(result.exit_code, Equals(0)) "
"<TAB><TAB>self.mock_get_provider_for.assert_called_once_with(self.result)"
"<TAB><TAB>result = self.run_command([self.step, "--provider", self.arg]) <MASK>"
"def test_valid_clean_providers(self): <TAB>self.useFixture( <TAB><TAB>fixtures.EnvironmentVariable("SNAPCRAFT_BUILD_ENVIRONMENT", self.env) <TAB>) <TAB>if self.result == "host": <TAB><TAB>result = self.run_command( <TAB><TAB><TAB>[self.step, "--provider", self.arg, "--destructive-mode"] <TAB><TAB>) <TAB><TAB>self.mock_get_provider_for.assert_not_called() <TAB>else: <TAB><TAB>result = self.run_command([self.step, "--provider", self.arg]) <MASK> <TAB>self.assertThat(result.exit_code, Equals(0)) "
"<TAB><TAB>self.mock_get_provider_for.assert_called_once_with(self.result)"
"def watch_question(request, question_id): <TAB>"""Start watching a question for replies or solution.""" <TAB>question = get_object_or_404(Question, pk=question_id, is_spam=False) <TAB>form = WatchQuestionForm(request.user, request.POST) <TAB># Process the form <TAB>msg = None <TAB>if form.is_valid(): <TAB><TAB>user_or_email = ( <TAB><TAB><TAB>request.user <TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB>else form.cleaned_data["email"] <TAB><TAB>) <TAB><TAB>try: <TAB><TAB><TAB>if form.cleaned_data["event_type"] == "reply": <TAB><TAB><TAB><TAB>QuestionReplyEvent.notify(user_or_email, question) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>QuestionSolvedEvent.notify(user_or_email, question) <TAB><TAB><TAB>statsd.incr("questions.watches.new") <TAB><TAB>except ActivationRequestFailed: <TAB><TAB><TAB>msg = _("Could not send a message to that email address.") <TAB># Respond to ajax request <TAB>if request.is_ajax(): <TAB><TAB>if form.is_valid(): <TAB><TAB><TAB>msg = msg or ( <TAB><TAB><TAB><TAB>_("You will be notified of updates by email.") <TAB><TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB><TAB>else _( <TAB><TAB><TAB><TAB><TAB>"You should receive an email shortly " <TAB><TAB><TAB><TAB><TAB>"to confirm your subscription." <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>return HttpResponse(json.dumps({"message": msg})) <TAB><TAB>if request.POST.get("from_vote"): <TAB><TAB><TAB>tmpl = "questions/includes/question_vote_thanks.html" <TAB><TAB>else: <TAB><TAB><TAB>tmpl = "questions/includes/email_subscribe.html" <TAB><TAB>html = render_to_string( <TAB><TAB><TAB>tmpl, context={"question": question, "watch_form": form}, request=request <TAB><TAB>) <TAB><TAB>return HttpResponse(json.dumps({"html": html})) <TAB>if msg: <TAB><TAB>messages.add_message(request, messages.ERROR, msg) <TAB>return HttpResponseRedirect(question.get_absolute_url()) "
"<TAB><TAB>user_or_email = ( <TAB><TAB><TAB>request.user <TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB>else form.cleaned_data["email"] <TAB><TAB>) <TAB><TAB>try: <TAB><TAB><TAB>if form.cleaned_data["event_type"] == "reply": <TAB><TAB><TAB><TAB>QuestionReplyEvent.notify(user_or_email, question) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>QuestionSolvedEvent.notify(user_or_email, question) <TAB><TAB><TAB>statsd.incr("questions.watches.new") <TAB><TAB>except ActivationRequestFailed: <TAB><TAB><TAB>msg = _("Could not send a message to that email address.")"
"<TAB><TAB>user_or_email = ( <TAB><TAB><TAB>request.user <TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB>else form.cleaned_data["email"] <TAB><TAB>) <TAB><TAB>try: <TAB><TAB><TAB>if form.cleaned_data["event_type"] == "reply": <TAB><TAB><TAB><TAB>QuestionReplyEvent.notify(user_or_email, question) <TAB><TAB><TAB>else: <MASK>"
"def watch_question(request, question_id): <TAB>"""Start watching a question for replies or solution.""" <TAB>question = get_object_or_404(Question, pk=question_id, is_spam=False) <TAB>form = WatchQuestionForm(request.user, request.POST) <TAB># Process the form <TAB>msg = None <TAB>if form.is_valid(): <TAB><TAB>user_or_email = ( <TAB><TAB><TAB>request.user <TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB>else form.cleaned_data["email"] <TAB><TAB>) <TAB><TAB>try: <TAB><TAB><TAB>if form.cleaned_data["event_type"] == "reply": <TAB><TAB><TAB><TAB>QuestionReplyEvent.notify(user_or_email, question) <TAB><TAB><TAB>else: <MASK> <TAB># Respond to ajax request <TAB>if request.is_ajax(): <TAB><TAB>if form.is_valid(): <TAB><TAB><TAB>msg = msg or ( <TAB><TAB><TAB><TAB>_("You will be notified of updates by email.") <TAB><TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB><TAB>else _( <TAB><TAB><TAB><TAB><TAB>"You should receive an email shortly " <TAB><TAB><TAB><TAB><TAB>"to confirm your subscription." <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>return HttpResponse(json.dumps({"message": msg})) <TAB><TAB>if request.POST.get("from_vote"): <TAB><TAB><TAB>tmpl = "questions/includes/question_vote_thanks.html" <TAB><TAB>else: <TAB><TAB><TAB>tmpl = "questions/includes/email_subscribe.html" <TAB><TAB>html = render_to_string( <TAB><TAB><TAB>tmpl, context={"question": question, "watch_form": form}, request=request <TAB><TAB>) <TAB><TAB>return HttpResponse(json.dumps({"html": html})) <TAB>if msg: <TAB><TAB>messages.add_message(request, messages.ERROR, msg) <TAB>return HttpResponseRedirect(question.get_absolute_url()) "
"<TAB><TAB><TAB><TAB>QuestionSolvedEvent.notify(user_or_email, question) <TAB><TAB><TAB>statsd.incr("questions.watches.new") <TAB><TAB>except ActivationRequestFailed: <TAB><TAB><TAB>msg = _("Could not send a message to that email address.")"
"<TAB><TAB>user_or_email = ( <TAB><TAB><TAB>request.user <TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB>else form.cleaned_data["email"] <TAB><TAB>) <TAB><TAB>try: <TAB><TAB><TAB>if form.cleaned_data["event_type"] == "reply": <TAB><TAB><TAB><TAB>QuestionReplyEvent.notify(user_or_email, question) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>QuestionSolvedEvent.notify(user_or_email, question) <TAB><TAB><TAB>statsd.incr("questions.watches.new") <TAB><TAB>except<MASK>"
"def watch_question(request, question_id): <TAB>"""Start watching a question for replies or solution.""" <TAB>question = get_object_or_404(Question, pk=question_id, is_spam=False) <TAB>form = WatchQuestionForm(request.user, request.POST) <TAB># Process the form <TAB>msg = None <TAB>if form.is_valid(): <TAB><TAB>user_or_email = ( <TAB><TAB><TAB>request.user <TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB>else form.cleaned_data["email"] <TAB><TAB>) <TAB><TAB>try: <TAB><TAB><TAB>if form.cleaned_data["event_type"] == "reply": <TAB><TAB><TAB><TAB>QuestionReplyEvent.notify(user_or_email, question) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>QuestionSolvedEvent.notify(user_or_email, question) <TAB><TAB><TAB>statsd.incr("questions.watches.new") <TAB><TAB>except<MASK> <TAB># Respond to ajax request <TAB>if request.is_ajax(): <TAB><TAB>if form.is_valid(): <TAB><TAB><TAB>msg = msg or ( <TAB><TAB><TAB><TAB>_("You will be notified of updates by email.") <TAB><TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB><TAB>else _( <TAB><TAB><TAB><TAB><TAB>"You should receive an email shortly " <TAB><TAB><TAB><TAB><TAB>"to confirm your subscription." <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>return HttpResponse(json.dumps({"message": msg})) <TAB><TAB>if request.POST.get("from_vote"): <TAB><TAB><TAB>tmpl = "questions/includes/question_vote_thanks.html" <TAB><TAB>else: <TAB><TAB><TAB>tmpl = "questions/includes/email_subscribe.html" <TAB><TAB>html = render_to_string( <TAB><TAB><TAB>tmpl, context={"question": question, "watch_form": form}, request=request <TAB><TAB>) <TAB><TAB>return HttpResponse(json.dumps({"html": html})) <TAB>if msg: <TAB><TAB>messages.add_message(request, messages.ERROR, msg) <TAB>return HttpResponseRedirect(question.get_absolute_url()) "
"ActivationRequestFailed: <TAB><TAB><TAB>msg = _("Could not send a message to that email address.")"
"<TAB><TAB>user_or_email = ( <TAB><TAB><TAB>request.user <TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB>else form.cleaned_data["email"] <TAB><TAB>) <TAB><TAB>try: <TAB><TAB><TAB>if form.cleaned_data["event_type"] == "reply": <TAB><TAB><TAB><TAB>QuestionReplyEvent.notify(user_or_email, question) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>QuestionSolvedEvent.notify(user_or_email, question) <TAB><TAB><TAB>statsd.incr("questions.watches.new") <TAB><TAB>except ActivationRequestFailed: <TAB><TAB><TAB>msg<MASK>"
"def watch_question(request, question_id): <TAB>"""Start watching a question for replies or solution.""" <TAB>question = get_object_or_404(Question, pk=question_id, is_spam=False) <TAB>form = WatchQuestionForm(request.user, request.POST) <TAB># Process the form <TAB>msg = None <TAB>if form.is_valid(): <TAB><TAB>user_or_email = ( <TAB><TAB><TAB>request.user <TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB>else form.cleaned_data["email"] <TAB><TAB>) <TAB><TAB>try: <TAB><TAB><TAB>if form.cleaned_data["event_type"] == "reply": <TAB><TAB><TAB><TAB>QuestionReplyEvent.notify(user_or_email, question) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>QuestionSolvedEvent.notify(user_or_email, question) <TAB><TAB><TAB>statsd.incr("questions.watches.new") <TAB><TAB>except ActivationRequestFailed: <TAB><TAB><TAB>msg<MASK> <TAB># Respond to ajax request <TAB>if request.is_ajax(): <TAB><TAB>if form.is_valid(): <TAB><TAB><TAB>msg = msg or ( <TAB><TAB><TAB><TAB>_("You will be notified of updates by email.") <TAB><TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB><TAB>else _( <TAB><TAB><TAB><TAB><TAB>"You should receive an email shortly " <TAB><TAB><TAB><TAB><TAB>"to confirm your subscription." <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>return HttpResponse(json.dumps({"message": msg})) <TAB><TAB>if request.POST.get("from_vote"): <TAB><TAB><TAB>tmpl = "questions/includes/question_vote_thanks.html" <TAB><TAB>else: <TAB><TAB><TAB>tmpl = "questions/includes/email_subscribe.html" <TAB><TAB>html = render_to_string( <TAB><TAB><TAB>tmpl, context={"question": question, "watch_form": form}, request=request <TAB><TAB>) <TAB><TAB>return HttpResponse(json.dumps({"html": html})) <TAB>if msg: <TAB><TAB>messages.add_message(request, messages.ERROR, msg) <TAB>return HttpResponseRedirect(question.get_absolute_url()) "
"= _("Could not send a message to that email address.")"
"def watch_question(request, question_id): <TAB>"""Start watching a question for replies or solution.""" <TAB>question = get_object_or_404(Question, pk=question_id, is_spam=False) <TAB>form = WatchQuestionForm(request.user, request.POST) <TAB># Process the form <TAB>msg = None <TAB>if form.is_valid(): <TAB><TAB>user_or_email = ( <TAB><TAB><TAB>request.user <TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB>else form.cleaned_data["email"] <TAB><TAB>) <TAB><TAB>try: <TAB><TAB><TAB>if form.cleaned_data["event_type"] == "reply": <TAB><TAB><TAB><TAB>QuestionReplyEvent.notify(user_or_email, question) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>QuestionSolvedEvent.notify(user_or_email, question) <TAB><TAB><TAB>statsd.incr("questions.watches.new") <TAB><TAB>except ActivationRequestFailed: <TAB><TAB><TAB>msg = _("Could not send a message to that email address.") <TAB># Respond to ajax request <TAB>if request.is_ajax(): <TAB><TAB>if form.is_valid(): <TAB><TAB><TAB>msg = msg or ( <TAB><TAB><TAB><TAB>_("You will be notified of updates by email.") <TAB><TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB><TAB>else _( <TAB><TAB><TAB><TAB><TAB>"You should receive an email shortly " <TAB><TAB><TAB><TAB><TAB>"to confirm your subscription." <TAB><TAB><TAB><TAB>) <TAB><TAB><TAB>) <TAB><TAB><TAB>return HttpResponse(json.dumps({"message": msg})) <TAB><TAB>if request.POST.get("from_vote"): <TAB><TAB><TAB>tmpl = "questions/includes/question_vote_thanks.html" <TAB><TAB>else: <TAB><TAB><TAB>tmpl = "questions/includes/email_subscribe.html" <TAB><TAB>html = render_to_string( <TAB><TAB><TAB>tmpl, context={"question": question, "watch_form": form}, request=request <TAB><TAB>) <TAB><TAB>return HttpResponse(json.dumps({"html": html})) <TAB>if msg: <TAB><TAB>messages.add_message(request, messages.ERROR, msg) <TAB>return HttpResponseRedirect(question.get_absolute_url()) "
"<TAB><TAB><TAB>if form.cleaned_data["event_type"] == "reply": <TAB><TAB><TAB><TAB>QuestionReplyEvent.notify(user_or_email, question) <TAB><TAB><TAB>else: <TAB><TAB><TAB><TAB>QuestionSolvedEvent.notify(user_or_email, question) <TAB><TAB><TAB>statsd.incr("questions.watches.new")"
"<TAB><TAB><TAB>if form.cleaned_data["event_type"] == "reply": <MASK>"
"def watch_question(request, question_id): <TAB>"""Start watching a question for replies or solution.""" <TAB>question = get_object_or_404(Question, pk=question_id, is_spam=False) <TAB>form = WatchQuestionForm(request.user, request.POST) <TAB># Process the form <TAB>msg = None <TAB>if form.is_valid(): <TAB><TAB>user_or_email = ( <TAB><TAB><TAB>request.user <TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB>else form.cleaned_data["email"] <TAB><TAB>) <TAB><TAB>try: <TAB><TAB><TAB>if form.cleaned_data["event_type"] == "reply": <MASK> <TAB><TAB>except ActivationRequestFailed: <TAB><TAB><TAB>msg = _("Could not send a message to that email address.") <TAB># Respond to ajax request <TAB>if request.is_ajax(): <TAB><TAB>if form.is_valid(): <TAB><TAB><TAB>msg = msg or ( <TAB><TAB><TAB><TAB>_("You will be notified of updates by email.") <TAB><TAB><TAB><TAB>if request.user.is_authenticated() <TAB><TAB><TAB><TAB>else _( <TAB><TAB><TAB><TAB><TAB>"Y