docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Mark additional columns as being part of the superkey. Supplements the Keys already extracted from the FSM template. Useful when adding new columns to existing tables. Note: This will impact attempts to further 'extend' the table as the superkey must be common between tables for successful extension. Args: key_list: list of header entries to be included in the superkey. Raises: KeyError: If any entry in list is not a valid header entry.
def AddKeys(self, key_list): for keyname in key_list: if keyname not in self.header: raise KeyError("'%s'" % keyname) self._keys = self._keys.union(set(key_list))
86,093
Support for [] notation. Args: column: Tuple of column names, or a (str) column name, or positional column number, 0-indexed. Returns: A list or string with column value(s). Raises: IndexError: The given column(s) were not found.
def __getitem__(self, column): if isinstance(column, (list, tuple)): ret = [] for col in column: ret.append(self[col]) return ret try: return self._values[self._index[column]] except (KeyError, TypeError, ValueError): pass # Perhaps we have a range like '1', ':-1' or '1:'. try: return self._values[column] except (IndexError, TypeError): pass raise IndexError('No such column "%s" in row.' % column)
86,250
Get an item from the Row by column name. Args: column: Tuple of column names, or a (str) column name, or positional column number, 0-indexed. default_value: The value to use if the key is not found. Returns: A list or string with column value(s) or default_value if not found.
def get(self, column, default_value=None): if isinstance(column, (list, tuple)): ret = [] for col in column: ret.append(self.get(col, default_value)) return ret # Perhaps we have a range like '1', ':-1' or '1:'. try: return self._values[column] except (IndexError, TypeError): pass try: return self[column] except IndexError: return default_value
86,252
Fetches the column number (0 indexed). Args: column: A string, column to fetch the index of. Returns: An int, the row index number. Raises: ValueError: The specified column was not found.
def index(self, column): # pylint: disable=C6409 for i, key in enumerate(self._keys): if key == column: return i raise ValueError('Column "%s" not found.' % column)
86,253
Inserts new values at a specified offset. Args: key: string for header value. value: string for a data value. row_index: Offset into row for data. Raises: IndexError: If the offset is out of bands.
def Insert(self, key, value, row_index): if row_index < 0: row_index += len(self) if not 0 <= row_index < len(self): raise IndexError('Index "%s" is out of bounds.' % row_index) new_row = Row() for idx in self.header: if self.index(idx) == row_index: new_row[key] = value new_row[idx] = self[idx] self._keys = new_row.header self._values = new_row.values del new_row self._BuildIndex()
86,255
Applies the function to every row in the table. Args: function: A function applied to each row. Returns: A new TextTable() Raises: TableError: When transform is not invalid row entry. The transform must be compatible with Append().
def Map(self, function): new_table = self.__class__() # pylint: disable=protected-access new_table._table = [self.header] for row in self: filtered_row = function(row) if filtered_row: new_table.Append(filtered_row) return new_table
86,259
Sorts rows in the texttable. Args: cmp: func, non default sort algorithm to use. key: func, applied to each element before sorting. reverse: bool, reverse order of sort.
def sort(self, cmp=None, key=None, reverse=False): def _DefaultKey(value): result = [] for key in self.header: # Try sorting as numerical value if possible. try: result.append(float(value[key])) except ValueError: result.append(value[key]) return result key = key or _DefaultKey # Exclude header by copying table. new_table = self._table[1:] if cmp is not None: key = cmp_to_key(cmp) new_table.sort(key=key, reverse=reverse) # Regenerate the table with original header self._table = [self.header] self._table.extend(new_table) # Re-write the 'row' attribute of each row for index, row in enumerate(self._table): row.row = index
86,260
Extends all rows in the texttable. The rows are extended with the new columns from the table. Args: table: A texttable, the table to extend this table by. keys: A set, the set of columns to use as the key. If None, the row index is used. Raises: IndexError: If key is not a valid column name.
def extend(self, table, keys=None): if keys: for k in keys: if k not in self._Header(): raise IndexError("Unknown key: '%s'", k) extend_with = [] for column in table.header: if column not in self.header: extend_with.append(column) if not extend_with: return for column in extend_with: self.AddColumn(column) if not keys: for row1, row2 in zip(self, table): for column in extend_with: row1[column] = row2[column] return for row1 in self: for row2 in table: for k in keys: if row1[k] != row2[k]: break else: for column in extend_with: row1[column] = row2[column] break
86,261
Removes a row from the table. Args: row: int, the row number to delete. Must be >= 1, as the header cannot be removed. Raises: TableError: Attempt to remove nonexistent or header row.
def Remove(self, row): if row == 0 or row > self.size: raise TableError("Attempt to remove header row") new_table = [] # pylint: disable=E1103 for t_row in self._table: if t_row.row != row: new_table.append(t_row) if t_row.row > row: t_row.row -= 1 self._table = new_table
86,262
Sets the current row to new list. Args: new_values: List|dict of new values to insert into row. row: int, Row to insert values into. Raises: TableError: If number of new values is not equal to row size.
def _SetRow(self, new_values, row=0): if not row: row = self._row_index if row > self.size: raise TableError("Entry %s beyond table size %s." % (row, self.size)) self._table[row].values = new_values
86,264
Sets header of table to the given tuple. Args: new_values: Tuple of new header values.
def _SetHeader(self, new_values): row = self.row_class() row.row = 0 for v in new_values: row[v] = v self._table[0] = row
86,265
Finds the largest indivisible word of a string. ...and thus the smallest possible column width that can contain that word unsplit over rows. Args: text: A string of text potentially consisting of words. Returns: Integer size of the largest single word in the text.
def _SmallestColSize(self, text): if not text: return 0 stripped = terminal.StripAnsiText(text) return max(len(word) for word in stripped.split())
86,266
Retrieves the first non header row with the column of the given value. Args: column: str, the name of the column to check. value: str, The value of the column to check. Returns: A Row() of the first row found, None otherwise. Raises: IndexError: The specified column does not exist.
def RowWith(self, column, value): for row in self._table[1:]: if row[column] == value: return row return None
86,267
Appends a new column to the table. Args: column: A string, name of the column to add. default: Default value for entries. Defaults to ''. col_index: Integer index for where to insert new column. Raises: TableError: Column name already exists.
def AddColumn(self, column, default="", col_index=-1): if column in self.table: raise TableError("Column %r already in table." % column) if col_index == -1: self._table[0][column] = column for i in range(1, len(self._table)): self._table[i][column] = default else: self._table[0].Insert(column, column, col_index) for i in range(1, len(self._table)): self._table[i].Insert(column, default, col_index)
86,268
Adds a new row (list) to the table. Args: new_values: Tuple, dict, or Row() of new values to append as a row. Raises: TableError: Supplied tuple not equal to table width.
def Append(self, new_values): newrow = self.NewRow() newrow.values = new_values self._table.append(newrow)
86,269
Fetches a new, empty row, with headers populated. Args: value: Initial value to set each row entry to. Returns: A Row() object.
def NewRow(self, value=""): newrow = self.row_class() newrow.row = self.size + 1 newrow.table = self headers = self._Header() for header in headers: newrow[header] = value return newrow
86,270
Parses buffer into tabular format. Strips off comments (preceded by '#'). Optionally parses and indexes by first line (header). Args: buf: String file buffer containing CSV data. header: Is the first line of buffer a header. separator: String that CSV is separated by. Returns: int, the size of the table created. Raises: TableError: A parsing error occurred.
def CsvToTable(self, buf, header=True, separator=","): self.Reset() header_row = self.row_class() if header: line = buf.readline() header_str = "" while not header_str: # Remove comments. header_str = line.split("#")[0].strip() if not header_str: line = buf.readline() header_list = header_str.split(separator) header_length = len(header_list) for entry in header_list: entry = entry.strip() if entry in header_row: raise TableError("Duplicate header entry %r." % entry) header_row[entry] = entry header_row.row = 0 self._table[0] = header_row # xreadlines would be better but not supported by StringIO for testing. for line in buf: # Support commented lines, provide '#' is first character of line. if line.startswith("#"): continue lst = line.split(separator) lst = [l.strip() for l in lst] if header and len(lst) != header_length: # Silently drop illegal line entries continue if not header: header_row = self.row_class() header_length = len(lst) header_row.values = dict( zip(range(header_length), range(header_length)) ) self._table[0] = header_row header = True continue new_row = self.NewRow() new_row.values = lst header_row.row = self.size + 1 self._table.append(new_row) return self.size
86,271
Returns windows interfaces through GetAdaptersAddresses. params: - extended: include anycast and multicast IPv6 (default False)
def get_windows_if_list(extended=False): # Should work on Windows XP+ def _get_mac(x): size = x["physical_address_length"] if size != 6: return "" data = bytearray(x["physical_address"]) return str2mac(bytes(data)[:size]) def _get_ips(x): unicast = x['first_unicast_address'] anycast = x['first_anycast_address'] multicast = x['first_multicast_address'] def _resolve_ips(y): if not isinstance(y, list): return [] ips = [] for ip in y: addr = ip['address']['address'].contents if addr.si_family == socket.AF_INET6: ip_key = "Ipv6" si_key = "sin6_addr" else: ip_key = "Ipv4" si_key = "sin_addr" data = getattr(addr, ip_key) data = getattr(data, si_key) data = bytes(bytearray(data.byte)) # Build IP if data: ips.append(inet_ntop(addr.si_family, data)) return ips ips = [] ips.extend(_resolve_ips(unicast)) if extended: ips.extend(_resolve_ips(anycast)) ips.extend(_resolve_ips(multicast)) return ips if six.PY2: _str_decode = lambda x: x.encode('utf8', errors='ignore') else: _str_decode = plain_str return [ { "name": _str_decode(x["friendly_name"]), "win_index": x["interface_index"], "description": _str_decode(x["description"]), "guid": _str_decode(x["adapter_name"]), "mac": _get_mac(x), "ipv4_metric": 0 if WINDOWS_XP else x["ipv4_metric"], "ipv6_metric": 0 if WINDOWS_XP else x["ipv6_metric"], "ips": _get_ips(x) } for x in GetAdaptersAddresses() ]
86,465
This function extracts the source/destination address of a 6LoWPAN from its upper Dot15d4Data (802.15.4 data) layer. params: - source: if True, the address is the source one. Otherwise, it is the destination. returns: the packed & processed address
def _extract_dot15d4address(pkt, source=True): underlayer = pkt.underlayer while underlayer is not None and not isinstance(underlayer, Dot15d4Data): # noqa: E501 underlayer = underlayer.underlayer if type(underlayer) == Dot15d4Data: addr = underlayer.src_addr if source else underlayer.dest_addr if underlayer.underlayer.fcf_destaddrmode == 3: tmp_ip = LINK_LOCAL_PREFIX[0:8] + struct.pack(">Q", addr) # noqa: E501 # Turn off the bit 7. tmp_ip = tmp_ip[0:8] + struct.pack("B", (orb(tmp_ip[8]) ^ 0x2)) + tmp_ip[9:16] # noqa: E501 elif underlayer.underlayer.fcf_destaddrmode == 2: tmp_ip = LINK_LOCAL_PREFIX[0:8] + \ b"\x00\x00\x00\xff\xfe\x00" + \ struct.pack(">Q", addr)[6:] return tmp_ip else: # Most of the times, it's necessary the IEEE 802.15.4 data to extract this address # noqa: E501 raise Exception('Unimplemented: IP Header is contained into IEEE 802.15.4 frame, in this case it\'s not available.')
87,344
List available layers, or infos on a given layer class or name. params: - obj: Packet / packet name to use - case_sensitive: if obj is a string, is it case sensitive? - verbose
def ls(obj=None, case_sensitive=False, verbose=False): is_string = isinstance(obj, six.string_types) if obj is None or is_string: tip = False if obj is None: tip = True all_layers = sorted(conf.layers, key=lambda x: x.__name__) else: pattern = re.compile(obj, 0 if case_sensitive else re.I) # We first order by accuracy, then length if case_sensitive: sorter = lambda x: (x.__name__.index(obj), len(x.__name__)) else: obj = obj.lower() sorter = lambda x: (x.__name__.lower().index(obj), len(x.__name__)) all_layers = sorted((layer for layer in conf.layers if (isinstance(layer.__name__, str) and pattern.search(layer.__name__)) or (isinstance(layer.name, str) and pattern.search(layer.name))), key=sorter) for layer in all_layers: print("%-10s : %s" % (layer.__name__, layer._name)) if tip and conf.interactive: print("\nTIP: You may use explore() to navigate through all " "layers using a clear GUI") else: is_pkt = isinstance(obj, Packet) if issubtype(obj, Packet) or is_pkt: for f in obj.fields_desc: cur_fld = f attrs = [] long_attrs = [] while isinstance(cur_fld, (Emph, ConditionalField)): if isinstance(cur_fld, ConditionalField): attrs.append(cur_fld.__class__.__name__[:4]) cur_fld = cur_fld.fld if verbose and isinstance(cur_fld, EnumField) \ and hasattr(cur_fld, "i2s"): if len(cur_fld.i2s) < 50: long_attrs.extend( "%s: %d" % (strval, numval) for numval, strval in sorted(six.iteritems(cur_fld.i2s)) ) elif isinstance(cur_fld, MultiEnumField): fld_depend = cur_fld.depends_on(obj.__class__ if is_pkt else obj) attrs.append("Depends on %s" % fld_depend.name) if verbose: cur_i2s = cur_fld.i2s_multi.get( cur_fld.depends_on(obj if is_pkt else obj()), {} ) if len(cur_i2s) < 50: long_attrs.extend( "%s: %d" % (strval, numval) for numval, strval in sorted(six.iteritems(cur_i2s)) ) elif verbose and isinstance(cur_fld, FlagsField): names = cur_fld.names long_attrs.append(", ".join(names)) class_name = "%s (%s)" % ( cur_fld.__class__.__name__, ", ".join(attrs)) if attrs else cur_fld.__class__.__name__ if isinstance(cur_fld, BitField): class_name += " (%d bit%s)" % (cur_fld.size, "s" if cur_fld.size > 1 else "") print("%-10s : %-35s =" % (f.name, class_name), end=' ') if is_pkt: print("%-15r" % (getattr(obj, f.name),), end=' ') print("(%r)" % (f.default,)) for attr in long_attrs: print("%-15s%s" % ("", attr)) if is_pkt and not isinstance(obj.payload, NoPayload): print("--") ls(obj.payload) else: print("Not a packet class or name. Type 'ls()' to list packet classes.")
87,400
Find all MACs registered to a OUI params: - name: the OUI name - case_sensitive: default to False returns: a dict of mac:tuples (Name, Extended Name)
def reverse_lookup(self, name, case_sensitive=False): if case_sensitive: filtr = lambda x, l: any(x == z for z in l) else: name = name.lower() filtr = lambda x, l: any(x == z.lower() for z in l) return {k: v for k, v in six.iteritems(self.__dict__) if filtr(name, v)}
87,573
Process all NetflowV9/10 Packets to match IDs of the DataFlowsets with the Headers params: - plist: the list of mixed NetflowV9/10 packets. - verb: verbose print (0/1)
def netflowv9_defragment(plist, verb=1): if not isinstance(plist, (PacketList, list)): plist = [plist] # We need the whole packet to be dissected to access field def in # NetflowFlowsetV9 or NetflowOptionsFlowsetV9/10 definitions = {} definitions_opts = {} ignored = set() # Iterate through initial list for pkt in plist: _netflowv9_defragment_packet(pkt, definitions, definitions_opts, ignored) if conf.verb >= 1 and ignored: warning("Ignored templateIDs (missing): %s" % list(ignored)) return plist
87,645
Return the interface mode. params: - iface: the iwconfig interface
def get_iface_mode(iface): p = subprocess.Popen(["iwconfig", iface], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, err = p.communicate() match = re.search(br"mode:([a-zA-Z]*)", output.lower()) if match: return plain_str(match.group(1)) return "unknown"
87,914
Sets the monitor mode (or remove it) from an interface. params: - iface: the iwconfig interface - monitor: True if the interface should be set in monitor mode, False if it should be in managed mode
def set_iface_monitor(iface, monitor): mode = get_iface_mode(iface) if mode == "unknown": warning("Could not parse iwconfig !") current_monitor = mode == "monitor" if monitor == current_monitor: # Already correct return True s_mode = "monitor" if monitor else "managed" def _check_call(commands): p = subprocess.Popen(commands, stderr=subprocess.PIPE, stdout=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: warning("%s failed !" % " ".join(commands)) return False return True try: assert _check_call(["ifconfig", iface, "down"]) assert _check_call(["iwconfig", iface, "mode", s_mode]) assert _check_call(["ifconfig", iface, "up"]) return True except AssertionError: return False
87,915
Checks that module has a higher version that minver. params: - module: a module to test - minver: a tuple of versions
def _version_checker(module, minver): # We could use LooseVersion, but distutils imports imp which is deprecated version_regexp = r'[a-z]?((?:\d|\.)+\d+)(?:\.dev[0-9]+)?' version_tags = re.match(version_regexp, module.__version__) if not version_tags: return False version_tags = version_tags.group(1).split(".") version_tags = tuple(int(x) for x in version_tags) return version_tags >= minver
88,087
Show the list of all existing contribs. Params: - name: filter to search the contribs - ret: whether the function should return a dict instead of printing it
def list_contrib(name=None, ret=False, _debug=False): # _debug: checks that all contrib modules have correctly defined: # # scapy.contrib.description = [...] # # scapy.contrib.status = [...] # # scapy.contrib.name = [...] (optional) # or set the flag: # # scapy.contrib.description = skip # to skip the file if name is None: name = "*.py" elif "*" not in name and "?" not in name and not name.endswith(".py"): name += ".py" results = [] dir_path = os.path.join(os.path.dirname(__file__), "contrib") if sys.version_info >= (3, 5): name = os.path.join(dir_path, "**", name) iterator = glob.iglob(name, recursive=True) else: name = os.path.join(dir_path, name) iterator = glob.iglob(name) for f in iterator: mod = f.replace(os.path.sep, ".").partition("contrib.")[2] if mod.startswith("__"): continue if mod.endswith(".py"): mod = mod[:-3] desc = {"description": None, "status": None, "name": mod} for l in io.open(f, errors="replace"): if l[0] != "#": continue p = l.find("scapy.contrib.") if p >= 0: p += 14 q = l.find("=", p) key = l[p:q].strip() value = l[q + 1:].strip() desc[key] = value if desc["status"] == "skip": break if desc["description"] and desc["status"]: results.append(desc) break if _debug: if desc["status"] == "skip": pass elif not desc["description"] or not desc["status"]: raise Scapy_Exception("Module %s is missing its " "contrib infos !" % mod) results.sort(key=lambda x: x["name"]) if ret: return results else: for desc in results: print("%(name)-20s: %(description)-40s status=%(status)s" % desc)
88,228
Save current Scapy session to the file specified in the fname arg. params: - fname: file to save the scapy session in - session: scapy session to use. If None, the console one will be used - pickleProto: pickle proto version (default: -1 = latest)
def save_session(fname=None, session=None, pickleProto=-1): from scapy import utils if fname is None: fname = conf.session if not fname: conf.session = fname = utils.get_temp_file(keep=True) log_interactive.info("Use [%s] as session file" % fname) if session is None: try: session = get_ipython().user_ns except Exception: session = six.moves.builtins.__dict__["scapy_session"] to_be_saved = session.copy() if "__builtins__" in to_be_saved: del(to_be_saved["__builtins__"]) for k in list(to_be_saved): i = to_be_saved[k] if hasattr(i, "__module__") and (k[0] == "_" or i.__module__.startswith("IPython")): # noqa: E501 del(to_be_saved[k]) if isinstance(i, ConfClass): del(to_be_saved[k]) elif isinstance(i, (type, type, types.ModuleType)): if k[0] != "_": log_interactive.error("[%s] (%s) can't be saved.", k, type(to_be_saved[k])) # noqa: E501 del(to_be_saved[k]) try: os.rename(fname, fname + ".bak") except OSError: pass f = gzip.open(fname, "wb") six.moves.cPickle.dump(to_be_saved, f, pickleProto) f.close() del f
88,229
Load current Scapy session from the file specified in the fname arg. This will erase any existing session. params: - fname: file to load the scapy session from
def load_session(fname=None): if fname is None: fname = conf.session try: s = six.moves.cPickle.load(gzip.open(fname, "rb")) except IOError: try: s = six.moves.cPickle.load(open(fname, "rb")) except IOError: # Raise "No such file exception" raise scapy_session = six.moves.builtins.__dict__["scapy_session"] scapy_session.clear() scapy_session.update(s) update_ipython_session(scapy_session) log_loading.info("Loaded session [%s]" % fname)
88,230
Update current Scapy session from the file specified in the fname arg. params: - fname: file to load the scapy session from
def update_session(fname=None): if fname is None: fname = conf.session try: s = six.moves.cPickle.load(gzip.open(fname, "rb")) except IOError: s = six.moves.cPickle.load(open(fname, "rb")) scapy_session = six.moves.builtins.__dict__["scapy_session"] scapy_session.update(s) update_ipython_session(scapy_session)
88,231
Create an interactive session and execute the commands passed as "cmds" and return all output params: - cmds: a list of commands to run returns: (output, returned) The output contains both sys.stdout and sys.stderr logs
def autorun_get_interactive_session(cmds, **kargs): sstdout, sstderr = sys.stdout, sys.stderr sw = StringWriter() try: try: sys.stdout = sys.stderr = sw res = autorun_commands(cmds, **kargs) except StopAutorun as e: e.code_run = sw.s raise finally: sys.stdout, sys.stderr = sstdout, sstderr return sw.s, res
88,237
This function is called during sendrecv() routine to select the available sockets. params: - sockets: an array of sockets that need to be selected returns: - an array of sockets that were selected - the function to be called next to get the packets (i.g. recv)
def select(sockets, remain=conf.recv_poll_rate): try: inp, _, _ = select(sockets, [], [], remain) except (IOError, select_error) as exc: # select.error has no .errno attribute if exc.args[0] != errno.EINTR: raise return inp, None
88,431
Returns the IPv4 routes to a host. parameters: - dst: the IPv4 of the destination host returns: (iface, output_ip, gateway_ip) - iface: the interface used to connect to the host - output_ip: the outgoing IP that will be used - gateway_ip: the gateway IP that will be used
def route(self, dst=None, verbose=conf.verb): dst = dst or "0.0.0.0" # Enable route(None) to return default route if isinstance(dst, bytes): try: dst = plain_str(dst) except UnicodeDecodeError: raise TypeError("Unknown IP address input (bytes)") if dst in self.cache: return self.cache[dst] # Transform "192.168.*.1-5" to one IP of the set _dst = dst.split("/")[0].replace("*", "0") while True: idx = _dst.find("-") if idx < 0: break m = (_dst[idx:] + ".").find(".") _dst = _dst[:idx] + _dst[idx + m:] atol_dst = atol(_dst) paths = [] for d, m, gw, i, a, me in self.routes: if not a: # some interfaces may not currently be connected continue aa = atol(a) if aa == atol_dst: paths.append( (0xffffffff, 1, (scapy.consts.LOOPBACK_INTERFACE, a, "0.0.0.0")) # noqa: E501 ) if (atol_dst & m) == (d & m): paths.append((m, me, (i, a, gw))) if not paths: if verbose: warning("No route found (no default route?)") return scapy.consts.LOOPBACK_INTERFACE, "0.0.0.0", "0.0.0.0" # Choose the more specific route # Sort by greatest netmask and use metrics as a tie-breaker paths.sort(key=lambda x: (-x[0], x[1])) # Return interface ret = paths[0][2] self.cache[dst] = ret return ret
88,563
Initialize an ``TensorFlowPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one using the default AWS configuration chain.
def __init__(self, endpoint_name, sagemaker_session=None): super(TensorFlowPredictor, self).__init__(endpoint_name, sagemaker_session, tf_json_serializer, tf_json_deserializer)
100,222
Stream the output of a process to stdout This function takes an existing process that will be polled for output. Only stdout will be polled and sent to sys.stdout. Args: process(subprocess.Popen): a process that has been started with stdout=PIPE and stderr=STDOUT Returns (int): process exit code
def _stream_output(process): exit_code = None while exit_code is None: stdout = process.stdout.readline().decode("utf-8") sys.stdout.write(stdout) exit_code = process.poll() if exit_code != 0: raise RuntimeError("Process exited with code: %s" % exit_code) return exit_code
100,236
Run a training job locally using docker-compose. Args: input_data_config (dict): The Input Data Configuration, this contains data such as the channels to be used for training. hyperparameters (dict): The HyperParameters for the training job. job_name (str): Name of the local training job being run. Returns (str): Location of the trained model.
def train(self, input_data_config, output_data_config, hyperparameters, job_name): self.container_root = self._create_tmp_folder() os.mkdir(os.path.join(self.container_root, 'output')) # create output/data folder since sagemaker-containers 2.0 expects it os.mkdir(os.path.join(self.container_root, 'output', 'data')) # A shared directory for all the containers. It is only mounted if the training script is # Local. shared_dir = os.path.join(self.container_root, 'shared') os.mkdir(shared_dir) data_dir = self._create_tmp_folder() volumes = self._prepare_training_volumes(data_dir, input_data_config, output_data_config, hyperparameters) # If local, source directory needs to be updated to mounted /opt/ml/code path hyperparameters = self._update_local_src_path(hyperparameters, key=sagemaker.estimator.DIR_PARAM_NAME) # Create the configuration files for each container that we will create # Each container will map the additional local volumes (if any). for host in self.hosts: _create_config_file_directories(self.container_root, host) self.write_config_files(host, hyperparameters, input_data_config) shutil.copytree(data_dir, os.path.join(self.container_root, host, 'input', 'data')) training_env_vars = { REGION_ENV_NAME: self.sagemaker_session.boto_region_name, TRAINING_JOB_NAME_ENV_NAME: job_name, } compose_data = self._generate_compose_file('train', additional_volumes=volumes, additional_env_vars=training_env_vars) compose_command = self._compose() if _ecr_login_if_needed(self.sagemaker_session.boto_session, self.image): _pull_image(self.image) process = subprocess.Popen(compose_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) try: _stream_output(process) except RuntimeError as e: # _stream_output() doesn't have the command line. We will handle the exception # which contains the exit code and append the command line to it. msg = "Failed to run: %s, %s" % (compose_command, str(e)) raise RuntimeError(msg) finally: artifacts = self.retrieve_artifacts(compose_data, output_data_config, job_name) # free up the training data directory as it may contain # lots of data downloaded from S3. This doesn't delete any local # data that was just mounted to the container. dirs_to_delete = [data_dir, shared_dir] self._cleanup(dirs_to_delete) # Print our Job Complete line to have a similar experience to training on SageMaker where you # see this line at the end. print('===== Job Complete =====') return artifacts
100,246
Host a local endpoint using docker-compose. Args: primary_container (dict): dictionary containing the container runtime settings for serving. Expected keys: - 'ModelDataUrl' pointing to a file or s3:// location. - 'Environment' a dictionary of environment variables to be passed to the hosting container.
def serve(self, model_dir, environment): logger.info("serving") self.container_root = self._create_tmp_folder() logger.info('creating hosting dir in {}'.format(self.container_root)) volumes = self._prepare_serving_volumes(model_dir) # If the user script was passed as a file:// mount it to the container. if sagemaker.estimator.DIR_PARAM_NAME.upper() in environment: script_dir = environment[sagemaker.estimator.DIR_PARAM_NAME.upper()] parsed_uri = urlparse(script_dir) if parsed_uri.scheme == 'file': volumes.append(_Volume(parsed_uri.path, '/opt/ml/code')) # Update path to mount location environment = environment.copy() environment[sagemaker.estimator.DIR_PARAM_NAME.upper()] = '/opt/ml/code' if _ecr_login_if_needed(self.sagemaker_session.boto_session, self.image): _pull_image(self.image) self._generate_compose_file('serve', additional_env_vars=environment, additional_volumes=volumes) compose_command = self._compose() self.container = _HostingContainer(compose_command) self.container.start()
100,247
Get the model artifacts from all the container nodes. Used after training completes to gather the data from all the individual containers. As the official SageMaker Training Service, it will override duplicate files if multiple containers have the same file names. Args: compose_data(dict): Docker-Compose configuration in dictionary format. Returns: Local path to the collected model artifacts.
def retrieve_artifacts(self, compose_data, output_data_config, job_name): # We need a directory to store the artfiacts from all the nodes # and another one to contained the compressed final artifacts artifacts = os.path.join(self.container_root, 'artifacts') compressed_artifacts = os.path.join(self.container_root, 'compressed_artifacts') os.mkdir(artifacts) model_artifacts = os.path.join(artifacts, 'model') output_artifacts = os.path.join(artifacts, 'output') artifact_dirs = [model_artifacts, output_artifacts, compressed_artifacts] for d in artifact_dirs: os.mkdir(d) # Gather the artifacts from all nodes into artifacts/model and artifacts/output for host in self.hosts: volumes = compose_data['services'][str(host)]['volumes'] for volume in volumes: host_dir, container_dir = volume.split(':') if container_dir == '/opt/ml/model': sagemaker.local.utils.recursive_copy(host_dir, model_artifacts) elif container_dir == '/opt/ml/output': sagemaker.local.utils.recursive_copy(host_dir, output_artifacts) # Tar Artifacts -> model.tar.gz and output.tar.gz model_files = [os.path.join(model_artifacts, name) for name in os.listdir(model_artifacts)] output_files = [os.path.join(output_artifacts, name) for name in os.listdir(output_artifacts)] sagemaker.utils.create_tar_file(model_files, os.path.join(compressed_artifacts, 'model.tar.gz')) sagemaker.utils.create_tar_file(output_files, os.path.join(compressed_artifacts, 'output.tar.gz')) if output_data_config['S3OutputPath'] == '': output_data = 'file://%s' % compressed_artifacts else: # Now we just need to move the compressed artifacts to wherever they are required output_data = sagemaker.local.utils.move_to_destination( compressed_artifacts, output_data_config['S3OutputPath'], job_name, self.sagemaker_session) _delete_tree(model_artifacts) _delete_tree(output_artifacts) return os.path.join(output_data, 'model.tar.gz')
100,249
Write the config files for the training containers. This method writes the hyperparameters, resources and input data configuration files. Args: host (str): Host to write the configuration for hyperparameters (dict): Hyperparameters for training. input_data_config (dict): Training input channels to be used for training. Returns: None
def write_config_files(self, host, hyperparameters, input_data_config): config_path = os.path.join(self.container_root, host, 'input', 'config') resource_config = { 'current_host': host, 'hosts': self.hosts } json_input_data_config = {} for c in input_data_config: channel_name = c['ChannelName'] json_input_data_config[channel_name] = { 'TrainingInputMode': 'File' } if 'ContentType' in c: json_input_data_config[channel_name]['ContentType'] = c['ContentType'] _write_json_file(os.path.join(config_path, 'hyperparameters.json'), hyperparameters) _write_json_file(os.path.join(config_path, 'resourceconfig.json'), resource_config) _write_json_file(os.path.join(config_path, 'inputdataconfig.json'), json_input_data_config)
100,250
Create a Volume instance the container path can be provided as a container_dir or as a channel name but not both. Args: host_dir (str): path to the volume data in the host container_dir (str): path inside the container that host_dir will be mapped to channel (str): channel name that the host_dir represents. It will be mapped as /opt/ml/input/data/<channel> in the container.
def __init__(self, host_dir, container_dir=None, channel=None): if not container_dir and not channel: raise ValueError('Either container_dir or channel must be declared.') if container_dir and channel: raise ValueError('container_dir and channel cannot be declared together.') self.container_dir = container_dir if container_dir else os.path.join('/opt/ml/input/data', channel) self.host_dir = host_dir if platform.system() == 'Darwin' and host_dir.startswith('/var'): self.host_dir = os.path.join('/private', host_dir) self.map = '{}:{}'.format(self.host_dir, self.container_dir)
100,262
Initialize an ``PyTorchPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one using the default AWS configuration chain.
def __init__(self, endpoint_name, sagemaker_session=None): super(PyTorchPredictor, self).__init__(endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer)
100,265
Set hyperparameters needed for training. Args: * records (:class:`~RecordSet`): The records to train this ``Estimator`` on. * mini_batch_size (int or None): The size of each mini-batch to use when training. If ``None``, a default value will be used. * job_name (str): Name of the training job to be created. If not specified, one is generated, using the base name given to the constructor if applicable.
def _prepare_for_training(self, records, mini_batch_size=None, job_name=None): num_records = None if isinstance(records, list): for record in records: if record.channel == 'train': num_records = record.num_records break if num_records is None: raise ValueError('Must provide train channel.') else: num_records = records.num_records # mini_batch_size is a required parameter default_mini_batch_size = min(self.DEFAULT_MINI_BATCH_SIZE, max(1, int(num_records / self.train_instance_count))) use_mini_batch_size = mini_batch_size or default_mini_batch_size super(PCA, self)._prepare_for_training(records=records, mini_batch_size=use_mini_batch_size, job_name=job_name)
100,268
Validate that the source directory exists and it contains the user script Args: script (str): Script filename. directory (str): Directory containing the source file. Raises: ValueError: If ``directory`` does not exist, is not a directory, or does not contain ``script``.
def validate_source_dir(script, directory): if directory: if not os.path.isfile(os.path.join(directory, script)): raise ValueError('No file named "{}" was found in directory "{}".'.format(script, directory)) return True
100,272
Extract the framework version from the image tag. Args: image_tag (str): Image tag, which should take the form '<framework_version>-<device>-<py_version>' Returns: str: The framework version.
def framework_version_from_tag(image_tag): tag_pattern = re.compile('^(.*)-(cpu|gpu)-(py2|py3)$') tag_match = tag_pattern.match(image_tag) return None if tag_match is None else tag_match.group(1)
100,276
Returns an (s3 bucket, key name/prefix) tuple from a url with an s3 scheme Args: url (str): Returns: tuple: A tuple containing: str: S3 bucket name str: S3 key
def parse_s3_url(url): parsed_url = urlparse(url) if parsed_url.scheme != "s3": raise ValueError("Expecting 's3' scheme, got: {} in {}".format(parsed_url.scheme, url)) return parsed_url.netloc, parsed_url.path.lstrip('/')
100,277
Describe a local training job. Args: TrainingJobName (str): Training job name to describe. Returns: (dict) DescribeTrainingJob Response.
def describe_training_job(self, TrainingJobName): if TrainingJobName not in LocalSagemakerClient._training_jobs: error_response = {'Error': {'Code': 'ValidationException', 'Message': 'Could not find local training job'}} raise ClientError(error_response, 'describe_training_job') else: return LocalSagemakerClient._training_jobs[TrainingJobName].describe()
100,281
Create a Local Model Object Args: ModelName (str): the Model Name PrimaryContainer (dict): a SageMaker primary container definition
def create_model(self, ModelName, PrimaryContainer, *args, **kwargs): # pylint: disable=unused-argument LocalSagemakerClient._models[ModelName] = _LocalModel(ModelName, PrimaryContainer)
100,284
Initializes a LocalSageMakerRuntimeClient Args: config (dict): Optional configuration for this client. In particular only the local port is read.
def __init__(self, config=None): self.http = urllib3.PoolManager() self.serving_port = 8080 self.config = config self.serving_port = get_config_value('local.serving_port', config) or 8080
100,291
Initialize an ``SKLearnPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one using the default AWS configuration chain.
def __init__(self, endpoint_name, sagemaker_session=None): super(SKLearnPredictor, self).__init__(endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer)
100,296
Initialize an AmazonAlgorithmEstimatorBase. Args: data_location (str or None): The s3 prefix to upload RecordSet objects to, expressed as an S3 url. For example "s3://example-bucket/some-key-prefix/". Objects will be saved in a unique sub-directory of the specified location. If None, a default data location will be used.
def __init__(self, role, train_instance_count, train_instance_type, data_location=None, **kwargs): super(AmazonAlgorithmEstimatorBase, self).__init__(role, train_instance_count, train_instance_type, **kwargs) data_location = data_location or "s3://{}/sagemaker-record-sets/".format( self.sagemaker_session.default_bucket()) self.data_location = data_location
100,303
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded. Returns: dictionary: The transformed init_params
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(AmazonAlgorithmEstimatorBase, cls)._prepare_init_params_from_job_description( job_details, model_channel_name) # The hyperparam names may not be the same as the class attribute that holds them, # for instance: local_lloyd_init_method is called local_init_method. We need to map these # and pass the correct name to the constructor. for attribute, value in cls.__dict__.items(): if isinstance(value, hp): if value.name in init_params['hyperparameters']: init_params[attribute] = init_params['hyperparameters'][value.name] del init_params['hyperparameters'] del init_params['image'] return init_params
100,306
Set hyperparameters needed for training. Args: * records (:class:`~RecordSet`): The records to train this ``Estimator`` on. * mini_batch_size (int or None): The size of each mini-batch to use when training. If ``None``, a default value will be used. * job_name (str): Name of the training job to be created. If not specified, one is generated, using the base name given to the constructor if applicable.
def _prepare_for_training(self, records, mini_batch_size=None, job_name=None): super(AmazonAlgorithmEstimatorBase, self)._prepare_for_training(job_name=job_name) feature_dim = None if isinstance(records, list): for record in records: if record.channel == 'train': feature_dim = record.feature_dim break if feature_dim is None: raise ValueError('Must provide train channel.') else: feature_dim = records.feature_dim self.feature_dim = feature_dim self.mini_batch_size = mini_batch_size
100,307
Return an Instance of :class:`sagemaker.local.data.DataSource` that can handle the provided data_source URI. data_source can be either file:// or s3:// Args: data_source (str): a valid URI that points to a data source. sagemaker_session (:class:`sagemaker.session.Session`): a SageMaker Session to interact with S3 if required. Returns :class:`sagemaker.local.data.DataSource`: an Instance of a Data Source
def get_data_source_instance(data_source, sagemaker_session): parsed_uri = urlparse(data_source) if parsed_uri.scheme == 'file': return LocalFileDataSource(parsed_uri.netloc + parsed_uri.path) elif parsed_uri.scheme == 's3': return S3DataSource(parsed_uri.netloc, parsed_uri.path, sagemaker_session)
100,311
Return an Instance of :class:`sagemaker.local.data.Splitter` according to the specified `split_type`. Args: split_type (str): either 'Line' or 'RecordIO'. Can be left as None to signal no data split will happen. Returns :class:`sagemaker.local.data.Splitter`: an Instance of a Splitter
def get_splitter_instance(split_type): if split_type is None: return NoneSplitter() elif split_type == 'Line': return LineSplitter() elif split_type == 'RecordIO': return RecordIOSplitter() else: raise ValueError('Invalid Split Type: %s' % split_type)
100,312
Return an Instance of :class:`sagemaker.local.data.BatchStrategy` according to `strategy` Args: strategy (str): Either 'SingleRecord' or 'MultiRecord' splitter (:class:`sagemaker.local.data.Splitter): splitter to get the data from. Returns :class:`sagemaker.local.data.BatchStrategy`: an Instance of a BatchStrategy
def get_batch_strategy_instance(strategy, splitter): if strategy == 'SingleRecord': return SingleRecordStrategy(splitter) elif strategy == 'MultiRecord': return MultiRecordStrategy(splitter) else: raise ValueError('Invalid Batch Strategy: %s - Valid Strategies: "SingleRecord", "MultiRecord"')
100,313
Create an S3DataSource instance Args: bucket (str): S3 bucket name prefix (str): S3 prefix path to the data sagemaker_session (:class:`sagemaker.session.Session`): a sagemaker_session with the desired settings to talk to S3
def __init__(self, bucket, prefix, sagemaker_session): # Create a temporary dir to store the S3 contents root_dir = sagemaker.utils.get_config_value('local.container_root', sagemaker_session.config) if root_dir: root_dir = os.path.abspath(root_dir) working_dir = tempfile.mkdtemp(dir=root_dir) # Docker cannot mount Mac OS /var folder properly see # https://forums.docker.com/t/var-folders-isnt-mounted-properly/9600 # Only apply this workaround if the user didn't provide an alternate storage root dir. if root_dir is None and platform.system() == 'Darwin': working_dir = '/private{}'.format(working_dir) sagemaker.utils.download_folder(bucket, prefix, working_dir, sagemaker_session) self.files = LocalFileDataSource(working_dir)
100,318
Split a file into records using a specific strategy This RecordIOSplitter splits the data into individual RecordIO records. Args: file (str): path to the file to split Returns: generator for the individual records that were split from the file
def split(self, file): with open(file, 'rb') as f: for record in sagemaker.amazon.common.read_recordio(f): yield record
100,319
Group together as many records as possible to fit in the specified size Args: file (str): file path to read the records from. size (int): maximum size in MB that each group of records will be fitted to. passing 0 means unlimited size. Returns: generator of records
def pad(self, file, size=6): buffer = '' for element in self.splitter.split(file): if _payload_size_within_limit(buffer + element, size): buffer += element else: tmp = buffer buffer = element yield tmp if _validate_payload_size(buffer, size): yield buffer
100,320
Group together as many records as possible to fit in the specified size This SingleRecordStrategy will not group any record and will return them one by one as long as they are within the maximum size. Args: file (str): file path to read the records from. size (int): maximum size in MB that each group of records will be fitted to. passing 0 means unlimited size. Returns: generator of records
def pad(self, file, size=6): for element in self.splitter.split(file): if _validate_payload_size(element, size): yield element
100,321
Initialize an ``MXNetPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one using the default AWS configuration chain.
def __init__(self, endpoint_name, sagemaker_session=None): super(MXNetPredictor, self).__init__(endpoint_name, sagemaker_session, json_serializer, json_deserializer)
100,322
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded. Returns: dictionary: The transformed init_params
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(Chainer, cls)._prepare_init_params_from_job_description(job_details, model_channel_name) for argument in [Chainer._use_mpi, Chainer._num_processes, Chainer._process_slots_per_host, Chainer._additional_mpi_options]: value = init_params['hyperparameters'].pop(argument, None) if value: init_params[argument[len('sagemaker_'):]] = value image_name = init_params.pop('image') framework, py_version, tag, _ = framework_name_from_image(image_name) if not framework: # If we were unable to parse the framework name from the image it is not one of our # officially supported images, in this case just add the image to the init params. init_params['image_name'] = image_name return init_params init_params['py_version'] = py_version init_params['framework_version'] = framework_version_from_tag(tag) training_job_name = init_params['base_job_name'] if framework != cls.__framework_name__: raise ValueError("Training job: {} didn't use image for requested framework".format(training_job_name)) return init_params
100,328
Create a SageMaker Model Entity Args: *args: Arguments coming from the caller. This class does not require any so they are ignored.
def _create_sagemaker_model(self, *args): # pylint: disable=unused-argument if self.algorithm_arn: # When ModelPackage is created using an algorithm_arn we need to first # create a ModelPackage. If we had already created one then its fine to re-use it. if self._created_model_package_name is None: model_package_name = self._create_sagemaker_model_package() self.sagemaker_session.wait_for_model_package(model_package_name) self._created_model_package_name = model_package_name model_package_name = self._created_model_package_name else: # When a ModelPackageArn is provided we just create the Model model_package_name = self.model_package_arn container_def = { 'ModelPackageName': model_package_name, } if self.env != {}: container_def['Environment'] = self.env model_package_short_name = model_package_name.split('/')[-1] enable_network_isolation = self.enable_network_isolation() self.name = self.name or utils.name_from_base(model_package_short_name) self.sagemaker_session.create_model(self.name, self.role, container_def, vpc_config=self.vpc_config, enable_network_isolation=enable_network_isolation)
100,356
Delete the Amazon SageMaker endpoint backing this predictor. Also delete the endpoint configuration attached to it if delete_endpoint_config is True. Args: delete_endpoint_config (bool, optional): Flag to indicate whether to delete endpoint configuration together with endpoint. Defaults to True. If True, both endpoint and endpoint configuration will be deleted. If False, only endpoint will be deleted.
def delete_endpoint(self, delete_endpoint_config=True): if delete_endpoint_config: self._delete_endpoint_config() self.sagemaker_session.delete_endpoint(self.endpoint)
100,372
Take data of various data formats and serialize them into CSV. Args: data (object): Data to be serialized. Returns: object: Sequence of bytes to be used for the request body.
def __call__(self, data): # For inputs which represent multiple "rows", the result should be newline-separated CSV rows if _is_mutable_sequence_like(data) and len(data) > 0 and _is_sequence_like(data[0]): return '\n'.join([_CsvSerializer._serialize_row(row) for row in data]) return _CsvSerializer._serialize_row(data)
100,376
Take data of various formats and serialize them into the expected request body. This uses information about supported input formats for the deployed model. Args: data (object): Data to be serialized. Returns: object: Serialized data used for the request.
def __call__(self, data): if isinstance(data, dict): # convert each value in dict from a numpy array to a list if necessary, so they can be json serialized return json.dumps({k: _ndarray_to_list(v) for k, v in six.iteritems(data)}) # files and buffers if hasattr(data, 'read'): return _json_serialize_from_buffer(data) return json.dumps(_ndarray_to_list(data))
100,381
Decode a JSON object into the corresponding Python object. Args: stream (stream): The response stream to be deserialized. content_type (str): The content type of the response. Returns: object: Body of the response deserialized into a JSON object.
def __call__(self, stream, content_type): try: return json.load(codecs.getreader('utf-8')(stream)) finally: stream.close()
100,382
Decode from serialized data into a Numpy array. Args: stream (stream): The response stream to be deserialized. content_type (str): The content type of the response. Can accept CSV, JSON, or NPY data. Returns: object: Body of the response deserialized into a Numpy array.
def __call__(self, stream, content_type=CONTENT_TYPE_NPY): try: if content_type == CONTENT_TYPE_CSV: return np.genfromtxt(codecs.getreader('utf-8')(stream), delimiter=',', dtype=self.dtype) elif content_type == CONTENT_TYPE_JSON: return np.array(json.load(codecs.getreader('utf-8')(stream)), dtype=self.dtype) elif content_type == CONTENT_TYPE_NPY: return np.load(BytesIO(stream.read())) finally: stream.close()
100,384
Serialize data into the request body in NPY format. Args: data (object): Data to be serialized. Can be a numpy array, list, file, or buffer. Returns: object: NPY serialized data used for the request.
def __call__(self, data, dtype=None): if isinstance(data, np.ndarray): if not data.size > 0: raise ValueError("empty array can't be serialized") return _npy_serialize(data) if isinstance(data, list): if not len(data) > 0: raise ValueError("empty array can't be serialized") return _npy_serialize(np.array(data, dtype)) # files and buffers. Assumed to hold npy-formatted data. if hasattr(data, 'read'): return data.read() return _npy_serialize(np.array(data))
100,385
A pandas dataframe with lots of interesting results about this object. Created by calling SageMaker List and Describe APIs and converting them into a convenient tabular summary. Args: force_refresh (bool): Set to True to fetch the latest data from SageMaker API.
def dataframe(self, force_refresh=False): if force_refresh: self.clear_cache() if self._dataframe is None: self._dataframe = self._fetch_dataframe() return self._dataframe
100,387
Initialize a ``HyperparameterTuningJobAnalytics`` instance. Args: hyperparameter_tuning_job_name (str): name of the HyperparameterTuningJob to analyze. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, one is created using the default AWS configuration chain.
def __init__(self, hyperparameter_tuning_job_name, sagemaker_session=None): sagemaker_session = sagemaker_session or Session() self._sage_client = sagemaker_session.sagemaker_client self._tuning_job_name = hyperparameter_tuning_job_name self.clear_cache()
100,388
Call ``DescribeHyperParameterTuningJob`` for the hyperparameter tuning job. Args: force_refresh (bool): Set to True to fetch the latest data from SageMaker API. Returns: dict: The Amazon SageMaker response for ``DescribeHyperParameterTuningJob``.
def description(self, force_refresh=False): if force_refresh: self.clear_cache() if not self._tuning_job_describe_result: self._tuning_job_describe_result = self._sage_client.describe_hyper_parameter_tuning_job( HyperParameterTuningJobName=self.name ) return self._tuning_job_describe_result
100,392
A (paginated) list of everything from ``ListTrainingJobsForTuningJob``. Args: force_refresh (bool): Set to True to fetch the latest data from SageMaker API. Returns: dict: The Amazon SageMaker response for ``ListTrainingJobsForTuningJob``.
def training_job_summaries(self, force_refresh=False): if force_refresh: self.clear_cache() if self._training_job_summaries is not None: return self._training_job_summaries output = [] next_args = {} for count in range(100): logging.debug("Calling list_training_jobs_for_hyper_parameter_tuning_job %d" % count) raw_result = self._sage_client.list_training_jobs_for_hyper_parameter_tuning_job( HyperParameterTuningJobName=self.name, MaxResults=100, **next_args ) new_output = raw_result['TrainingJobSummaries'] output.extend(new_output) logging.debug("Got %d more TrainingJobs. Total so far: %d" % (len(new_output), len(output))) if ('NextToken' in raw_result) and (len(new_output) > 0): next_args['NextToken'] = raw_result['NextToken'] else: break self._training_job_summaries = output return output
100,393
Append a timestamp to the provided string. This function assures that the total length of the resulting string is not longer than the specified max length, trimming the input parameter if necessary. Args: base (str): String used as prefix to generate the unique name. max_length (int): Maximum length for the resulting string. short (bool): Whether or not to use a truncated timestamp. Returns: str: Input parameter with appended timestamp.
def name_from_base(base, max_length=63, short=False): timestamp = sagemaker_short_timestamp() if short else sagemaker_timestamp() trimmed_base = base[:max_length - len(timestamp) - 1] return '{}-{}'.format(trimmed_base, timestamp)
100,401
Extract the base name of the image to use as the 'algorithm name' for the job. Args: image (str): Image name. Returns: str: Algorithm name, as extracted from the image name.
def base_name_from_image(image): m = re.match("^(.+/)?([^:/]+)(:[^:]+)?$", image) algo_name = m.group(2) if m else image return algo_name
100,403
Convert the input to a string, unless it is a unicode string in Python 2. Unicode strings are supported as native strings in Python 3, but ``str()`` cannot be invoked on unicode strings in Python 2, so we need to check for that case when converting user-specified values to strings. Args: value: The value to convert to a string. Returns: str or unicode: The string representation of the value or the unicode string itself.
def to_str(value): if sys.version_info.major < 3 and isinstance(value, six.string_types): return value return str(value)
100,407
Returns a string contains last modified time and the secondary training job status message. Args: job_description: Returned response from DescribeTrainingJob call prev_description: Previous job description from DescribeTrainingJob call Returns: str: Job status string to be printed.
def secondary_training_status_message(job_description, prev_description): if job_description is None or job_description.get('SecondaryStatusTransitions') is None\ or len(job_description.get('SecondaryStatusTransitions')) == 0: return '' prev_description_secondary_transitions = prev_description.get('SecondaryStatusTransitions')\ if prev_description is not None else None prev_transitions_num = len(prev_description['SecondaryStatusTransitions'])\ if prev_description_secondary_transitions is not None else 0 current_transitions = job_description['SecondaryStatusTransitions'] if len(current_transitions) == prev_transitions_num: # Secondary status is not changed but the message changed. transitions_to_print = current_transitions[-1:] else: # Secondary status is changed we need to print all the entries. transitions_to_print = current_transitions[prev_transitions_num - len(current_transitions):] status_strs = [] for transition in transitions_to_print: message = transition['StatusMessage'] time_str = datetime.utcfromtimestamp( time.mktime(job_description['LastModifiedTime'].timetuple())).strftime('%Y-%m-%d %H:%M:%S') status_strs.append('{} {} - {}'.format(time_str, transition['Status'], message)) return '\n'.join(status_strs)
100,409
Download a folder from S3 to a local path Args: bucket_name (str): S3 bucket name prefix (str): S3 prefix within the bucket that will be downloaded. Can be a single file. target (str): destination path where the downloaded items will be placed sagemaker_session (:class:`sagemaker.session.Session`): a sagemaker session to interact with S3.
def download_folder(bucket_name, prefix, target, sagemaker_session): boto_session = sagemaker_session.boto_session s3 = boto_session.resource('s3') bucket = s3.Bucket(bucket_name) prefix = prefix.lstrip('/') # there is a chance that the prefix points to a file and not a 'directory' if that is the case # we should just download it. objects = list(bucket.objects.filter(Prefix=prefix)) if len(objects) > 0 and objects[0].key == prefix and prefix[-1] != '/': s3.Object(bucket_name, prefix).download_file(os.path.join(target, os.path.basename(prefix))) return # the prefix points to an s3 'directory' download the whole thing for obj_sum in bucket.objects.filter(Prefix=prefix): # if obj_sum is a folder object skip it. if obj_sum.key != '' and obj_sum.key[-1] == '/': continue obj = s3.Object(obj_sum.bucket_name, obj_sum.key) s3_relative_path = obj_sum.key[len(prefix):].lstrip('/') file_path = os.path.join(target, s3_relative_path) try: os.makedirs(os.path.dirname(file_path)) except OSError as exc: # EEXIST means the folder already exists, this is safe to skip # anything else will be raised. if exc.errno != errno.EEXIST: raise obj.download_file(file_path)
100,410
Create a tar file containing all the source_files Args: source_files (List[str]): List of file paths that will be contained in the tar file Returns: (str): path to created tar file
def create_tar_file(source_files, target=None): if target: filename = target else: _, filename = tempfile.mkstemp() with tarfile.open(filename, mode='w:gz') as t: for sf in source_files: # Add all files from the directory into the root of the directory structure of the tar t.add(sf, arcname=os.path.basename(sf)) return filename
100,411
Download a Single File from S3 into a local path Args: bucket_name (str): S3 bucket name path (str): file path within the bucket target (str): destination directory for the downloaded file. sagemaker_session (:class:`sagemaker.session.Session`): a sagemaker session to interact with S3.
def download_file(bucket_name, path, target, sagemaker_session): path = path.lstrip('/') boto_session = sagemaker_session.boto_session s3 = boto_session.resource('s3') bucket = s3.Bucket(bucket_name) bucket.download_file(path, target)
100,412
Initialize ``Tensorboard`` instance. Args: estimator (sagemaker.estimator.Framework): A SageMaker ``Estimator``. logdir (str): Directory for logs (default: None). If not specified, a temporary directory is made.
def __init__(self, estimator, logdir=None): threading.Thread.__init__(self) self.event = threading.Event() self.estimator = estimator self.logdir = logdir or tempfile.mkdtemp()
100,413
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. Returns: dictionary: The transformed init_params
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(TensorFlow, cls)._prepare_init_params_from_job_description(job_details, model_channel_name) # Move some of the tensorflow specific init params from hyperparameters into the main init params. for argument in ('checkpoint_path', 'training_steps', 'evaluation_steps', 'model_dir'): value = init_params['hyperparameters'].pop(argument, None) if value is not None: init_params[argument] = value image_name = init_params.pop('image') framework, py_version, tag, script_mode = fw.framework_name_from_image(image_name) if not framework: # If we were unable to parse the framework name from the image it is not one of our # officially supported images, in this case just add the image to the init params. init_params['image_name'] = image_name return init_params if script_mode: init_params['script_mode'] = True init_params['py_version'] = py_version # We switched image tagging scheme from regular image version (e.g. '1.0') to more expressive # containing framework version, device type and python version (e.g. '1.5-gpu-py2'). # For backward compatibility map deprecated image tag '1.0' to a '1.4' framework version # otherwise extract framework version from the tag itself. init_params['framework_version'] = '1.4' if tag == '1.0' else fw.framework_version_from_tag( tag) training_job_name = init_params['base_job_name'] if framework != cls.__framework_name__: raise ValueError("Training job: {} didn't use image for requested framework".format( training_job_name)) return init_params
100,422
Initialize an ``ChainerPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one using the default AWS configuration chain.
def __init__(self, endpoint_name, sagemaker_session=None): super(ChainerPredictor, self).__init__(endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer)
100,432
Initializes the ``WarmStartConfig`` with the provided ``WarmStartTypes`` and parents. Args: warm_start_type (sagemaker.tuner.WarmStartTypes): This should be one of the supported warm start types in WarmStartType parents (set{str}): Set of parent tuning jobs which will be used to warm start the new tuning job.
def __init__(self, warm_start_type, parents): if warm_start_type not in WarmStartTypes: raise ValueError( "Invalid type: {}, valid warm start types are: [{}]".format(warm_start_type, [t for t in WarmStartTypes])) if not parents: raise ValueError("Invalid parents: {}, parents should not be None/empty".format(parents)) self.type = warm_start_type self.parents = set(parents)
100,443
Delete an Amazon SageMaker endpoint. If an endpoint name is not specified, this defaults to looking for an endpoint that shares a name with the best training job for deletion. Args: endpoint_name (str): Name of the endpoint to delete
def delete_endpoint(self, endpoint_name=None): endpoint_name = endpoint_name or self.best_training_job() self.sagemaker_session.delete_endpoint(endpoint_name)
100,452
Create a new Amazon SageMaker hyperparameter tuning job from the HyperparameterTuner. Args: tuner (sagemaker.tuner.HyperparameterTuner): HyperparameterTuner object created by the user. inputs (str): Parameters used when called :meth:`~sagemaker.estimator.EstimatorBase.fit`. Returns: sagemaker.tuner._TuningJob: Constructed object that captures all information about the started job.
def start_new(cls, tuner, inputs): config = _Job._load_config(inputs, tuner.estimator) warm_start_config_req = None if tuner.warm_start_config: warm_start_config_req = tuner.warm_start_config.to_input_req() tuner_args = config.copy() tuner_args['job_name'] = tuner._current_job_name tuner_args['strategy'] = tuner.strategy tuner_args['objective_type'] = tuner.objective_type tuner_args['objective_metric_name'] = tuner.objective_metric_name tuner_args['max_jobs'] = tuner.max_jobs tuner_args['max_parallel_jobs'] = tuner.max_parallel_jobs tuner_args['parameter_ranges'] = tuner.hyperparameter_ranges() tuner_args['static_hyperparameters'] = tuner.static_hyperparameters tuner_args['input_mode'] = tuner.estimator.input_mode tuner_args['metric_definitions'] = tuner.metric_definitions tuner_args['tags'] = tuner.tags tuner_args['warm_start_config'] = warm_start_config_req tuner_args['early_stopping_type'] = tuner.early_stopping_type if isinstance(tuner.estimator, sagemaker.algorithm.AlgorithmEstimator): tuner_args['algorithm_arn'] = tuner.estimator.algorithm_arn else: tuner_args['image'] = tuner.estimator.train_image() tuner_args['enable_network_isolation'] = tuner.estimator.enable_network_isolation() tuner_args['encrypt_inter_container_traffic'] = \ tuner.estimator.encrypt_inter_container_traffic tuner.estimator.sagemaker_session.tune(**tuner_args) return cls(tuner.sagemaker_session, tuner._current_job_name)
100,463
Initialize the class. Args: force (bool): If True, render colorizes output no matter where the output is (default: False).
def __init__(self, force=False): self.colorize = force or sys.stdout.isatty() or os.environ.get('JPY_PARENT_PID', None)
100,470
Print the output, colorized or not, depending on the environment. Args: index (int): The instance number. s (str): The string to print.
def __call__(self, index, s): if self.colorize: self._color_wrap(index, s) else: print(s)
100,471
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded. Returns: dictionary: The transformed init_params
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(RLEstimator, cls)\ ._prepare_init_params_from_job_description(job_details, model_channel_name) image_name = init_params.pop('image') framework, _, tag, _ = fw_utils.framework_name_from_image(image_name) if not framework: # If we were unable to parse the framework name from the image it is not one of our # officially supported images, in this case just add the image to the init params. init_params['image_name'] = image_name return init_params toolkit, toolkit_version = cls._toolkit_and_version_from_tag(tag) if not cls._is_combination_supported(toolkit, toolkit_version, framework): training_job_name = init_params['base_job_name'] raise ValueError( "Training job: {} didn't use image for requested framework".format( training_job_name) ) init_params['toolkit'] = RLToolkit(toolkit) init_params['toolkit_version'] = toolkit_version init_params['framework'] = RLFramework(framework) return init_params
100,476
Provides default metric definitions based on provided toolkit. Args: toolkit(sagemaker.rl.RLToolkit): RL Toolkit to be used for training. Returns: list: metric definitions
def default_metric_definitions(cls, toolkit): if toolkit is RLToolkit.COACH: return [ {'Name': 'reward-training', 'Regex': '^Training>.*Total reward=(.*?),'}, {'Name': 'reward-testing', 'Regex': '^Testing>.*Total reward=(.*?),'} ] elif toolkit is RLToolkit.RAY: float_regex = "[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?" # noqa: W605, E501 pylint: disable=anomalous-backslash-in-string return [ {'Name': 'episode_reward_mean', 'Regex': 'episode_reward_mean: (%s)' % float_regex}, {'Name': 'episode_reward_max', 'Regex': 'episode_reward_max: (%s)' % float_regex} ]
100,484
Prepare S3 operations (specify where to upload `source_dir`) and environment variables related to framework. Args: estimator (sagemaker.estimator.Estimator): The framework estimator to get information from and update. s3_operations (dict): The dict to specify s3 operations (upload `source_dir`).
def prepare_framework(estimator, s3_operations): if estimator.code_location is not None: bucket, key = fw_utils.parse_s3_url(estimator.code_location) key = os.path.join(key, estimator._current_job_name, 'source', 'sourcedir.tar.gz') else: bucket = estimator.sagemaker_session._default_bucket key = os.path.join(estimator._current_job_name, 'source', 'sourcedir.tar.gz') script = os.path.basename(estimator.entry_point) if estimator.source_dir and estimator.source_dir.lower().startswith('s3://'): code_dir = estimator.source_dir estimator.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script) else: code_dir = 's3://{}/{}'.format(bucket, key) estimator.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script) s3_operations['S3Upload'] = [{ 'Path': estimator.source_dir or script, 'Bucket': bucket, 'Key': key, 'Tar': True }] estimator._hyperparameters[sagemaker.model.DIR_PARAM_NAME] = code_dir estimator._hyperparameters[sagemaker.model.SCRIPT_PARAM_NAME] = script estimator._hyperparameters[sagemaker.model.CLOUDWATCH_METRICS_PARAM_NAME] = \ estimator.enable_cloudwatch_metrics estimator._hyperparameters[sagemaker.model.CONTAINER_LOG_LEVEL_PARAM_NAME] = estimator.container_log_level estimator._hyperparameters[sagemaker.model.JOB_NAME_PARAM_NAME] = estimator._current_job_name estimator._hyperparameters[sagemaker.model.SAGEMAKER_REGION_PARAM_NAME] = \ estimator.sagemaker_session.boto_region_name
100,485
Updated the S3 URI of the framework source directory in given estimator. Args: estimator (sagemaker.estimator.Framework): The Framework estimator to update. job_name (str): The new job name included in the submit S3 URI Returns: str: The updated S3 URI of framework source directory
def update_submit_s3_uri(estimator, job_name): if estimator.uploaded_code is None: return pattern = r'(?<=/)[^/]+?(?=/source/sourcedir.tar.gz)' # update the S3 URI with the latest training job. # s3://path/old_job/source/sourcedir.tar.gz will become s3://path/new_job/source/sourcedir.tar.gz submit_uri = estimator.uploaded_code.s3_prefix submit_uri = re.sub(pattern, job_name, submit_uri) script_name = estimator.uploaded_code.script_name estimator.uploaded_code = fw_utils.UploadedCode(submit_uri, script_name)
100,490
Prepare the framework model container information. Specify related S3 operations for Airflow to perform. (Upload `source_dir`) Args: model (sagemaker.model.FrameworkModel): The framework model instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'. s3_operations (dict): The dict to specify S3 operations (upload `source_dir`). Returns: dict: The container information of this framework model.
def prepare_framework_container_def(model, instance_type, s3_operations): deploy_image = model.image if not deploy_image: region_name = model.sagemaker_session.boto_session.region_name deploy_image = fw_utils.create_image_uri( region_name, model.__framework_name__, instance_type, model.framework_version, model.py_version) base_name = utils.base_name_from_image(deploy_image) model.name = model.name or utils.name_from_base(base_name) bucket = model.bucket or model.sagemaker_session._default_bucket script = os.path.basename(model.entry_point) key = '{}/source/sourcedir.tar.gz'.format(model.name) if model.source_dir and model.source_dir.lower().startswith('s3://'): code_dir = model.source_dir model.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script) else: code_dir = 's3://{}/{}'.format(bucket, key) model.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script) s3_operations['S3Upload'] = [{ 'Path': model.source_dir or script, 'Bucket': bucket, 'Key': key, 'Tar': True }] deploy_env = dict(model.env) deploy_env.update(model._framework_env_vars()) try: if model.model_server_workers: deploy_env[sagemaker.model.MODEL_SERVER_WORKERS_PARAM_NAME.upper()] = str(model.model_server_workers) except AttributeError: # This applies to a FrameworkModel which is not SageMaker Deep Learning Framework Model pass return sagemaker.container_def(deploy_image, model.model_data, deploy_env)
100,492
Set any values in the estimator that need to be set before training. Args: * job_name (str): Name of the training job to be created. If not specified, one is generated, using the base name given to the constructor if applicable.
def _prepare_for_training(self, job_name=None): if job_name is not None: self._current_job_name = job_name else: # honor supplied base_job_name or generate it if self.base_job_name: base_name = self.base_job_name elif isinstance(self, sagemaker.algorithm.AlgorithmEstimator): base_name = self.algorithm_arn.split('/')[-1] # pylint: disable=no-member else: base_name = base_name_from_image(self.train_image()) self._current_job_name = name_from_base(base_name) # if output_path was specified we use it otherwise initialize here. # For Local Mode with local_code=True we don't need an explicit output_path if self.output_path is None: local_code = get_config_value('local.local_code', self.sagemaker_session.config) if self.sagemaker_session.local_mode and local_code: self.output_path = '' else: self.output_path = 's3://{}/'.format(self.sagemaker_session.default_bucket())
100,522
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded. Returns: dictionary: The transformed init_params
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = dict() init_params['role'] = job_details['RoleArn'] init_params['train_instance_count'] = job_details['ResourceConfig']['InstanceCount'] init_params['train_instance_type'] = job_details['ResourceConfig']['InstanceType'] init_params['train_volume_size'] = job_details['ResourceConfig']['VolumeSizeInGB'] init_params['train_max_run'] = job_details['StoppingCondition']['MaxRuntimeInSeconds'] init_params['input_mode'] = job_details['AlgorithmSpecification']['TrainingInputMode'] init_params['base_job_name'] = job_details['TrainingJobName'] init_params['output_path'] = job_details['OutputDataConfig']['S3OutputPath'] init_params['output_kms_key'] = job_details['OutputDataConfig']['KmsKeyId'] has_hps = 'HyperParameters' in job_details init_params['hyperparameters'] = job_details['HyperParameters'] if has_hps else {} if 'TrainingImage' in job_details['AlgorithmSpecification']: init_params['image'] = job_details['AlgorithmSpecification']['TrainingImage'] elif 'AlgorithmName' in job_details['AlgorithmSpecification']: init_params['algorithm_arn'] = job_details['AlgorithmSpecification']['AlgorithmName'] else: raise RuntimeError('Invalid AlgorithmSpecification. Either TrainingImage or ' 'AlgorithmName is expected. None was found.') if 'MetricDefinitons' in job_details['AlgorithmSpecification']: init_params['metric_definitions'] = job_details['AlgorithmSpecification']['MetricsDefinition'] if 'EnableInterContainerTrafficEncryption' in job_details: init_params['encrypt_inter_container_traffic'] = \ job_details['EnableInterContainerTrafficEncryption'] subnets, security_group_ids = vpc_utils.from_dict(job_details.get(vpc_utils.VPC_CONFIG_KEY)) if subnets: init_params['subnets'] = subnets if security_group_ids: init_params['security_group_ids'] = security_group_ids if 'InputDataConfig' in job_details and model_channel_name: for channel in job_details['InputDataConfig']: if channel['ChannelName'] == model_channel_name: init_params['model_channel_name'] = model_channel_name init_params['model_uri'] = channel['DataSource']['S3DataSource']['S3Uri'] break return init_params
100,529
Create a new Amazon SageMaker training job from the estimator. Args: estimator (sagemaker.estimator.EstimatorBase): Estimator object created by the user. inputs (str): Parameters used when called :meth:`~sagemaker.estimator.EstimatorBase.fit`. Returns: sagemaker.estimator._TrainingJob: Constructed object that captures all information about the started training job.
def start_new(cls, estimator, inputs): local_mode = estimator.sagemaker_session.local_mode model_uri = estimator.model_uri # Allow file:// input only in local mode if cls._is_local_channel(inputs) or cls._is_local_channel(model_uri): if not local_mode: raise ValueError('File URIs are supported in local mode only. Please use a S3 URI instead.') config = _Job._load_config(inputs, estimator) if estimator.hyperparameters() is not None: hyperparameters = {str(k): str(v) for (k, v) in estimator.hyperparameters().items()} train_args = config.copy() train_args['input_mode'] = estimator.input_mode train_args['job_name'] = estimator._current_job_name train_args['hyperparameters'] = hyperparameters train_args['tags'] = estimator.tags train_args['metric_definitions'] = estimator.metric_definitions if estimator.enable_network_isolation(): train_args['enable_network_isolation'] = True if estimator.encrypt_inter_container_traffic: train_args['encrypt_inter_container_traffic'] = True if isinstance(estimator, sagemaker.algorithm.AlgorithmEstimator): train_args['algorithm_arn'] = estimator.algorithm_arn else: train_args['image'] = estimator.train_image() estimator.sagemaker_session.train(**train_args) return cls(estimator.sagemaker_session, estimator._current_job_name)
100,534
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded Returns: dictionary: The transformed init_params
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(Estimator, cls)._prepare_init_params_from_job_description(job_details, model_channel_name) init_params['image_name'] = init_params.pop('image') return init_params
100,539
Set hyperparameters needed for training. This method will also validate ``source_dir``. Args: * job_name (str): Name of the training job to be created. If not specified, one is generated, using the base name given to the constructor if applicable.
def _prepare_for_training(self, job_name=None): super(Framework, self)._prepare_for_training(job_name=job_name) # validate source dir will raise a ValueError if there is something wrong with the # source directory. We are intentionally not handling it because this is a critical error. if self.source_dir and not self.source_dir.lower().startswith('s3://'): validate_source_dir(self.entry_point, self.source_dir) # if we are in local mode with local_code=True. We want the container to just # mount the source dir instead of uploading to S3. local_code = get_config_value('local.local_code', self.sagemaker_session.config) if self.sagemaker_session.local_mode and local_code: # if there is no source dir, use the directory containing the entry point. if self.source_dir is None: self.source_dir = os.path.dirname(self.entry_point) self.entry_point = os.path.basename(self.entry_point) code_dir = 'file://' + self.source_dir script = self.entry_point else: self.uploaded_code = self._stage_user_code_in_s3() code_dir = self.uploaded_code.s3_prefix script = self.uploaded_code.script_name # Modify hyperparameters in-place to point to the right code directory and script URIs self._hyperparameters[DIR_PARAM_NAME] = code_dir self._hyperparameters[SCRIPT_PARAM_NAME] = script self._hyperparameters[CLOUDWATCH_METRICS_PARAM_NAME] = self.enable_cloudwatch_metrics self._hyperparameters[CONTAINER_LOG_LEVEL_PARAM_NAME] = self.container_log_level self._hyperparameters[JOB_NAME_PARAM_NAME] = self._current_job_name self._hyperparameters[SAGEMAKER_REGION_PARAM_NAME] = self.sagemaker_session.boto_region_name
100,541
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded Returns: dictionary: The transformed init_params
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(Framework, cls)._prepare_init_params_from_job_description(job_details, model_channel_name) init_params['entry_point'] = json.loads(init_params['hyperparameters'].get(SCRIPT_PARAM_NAME)) init_params['source_dir'] = json.loads(init_params['hyperparameters'].get(DIR_PARAM_NAME)) init_params['enable_cloudwatch_metrics'] = json.loads( init_params['hyperparameters'].get(CLOUDWATCH_METRICS_PARAM_NAME)) init_params['container_log_level'] = json.loads( init_params['hyperparameters'].get(CONTAINER_LOG_LEVEL_PARAM_NAME)) hyperparameters = {} for k, v in init_params['hyperparameters'].items(): # Tuning jobs add this special hyperparameter which is not JSON serialized if k == '_tuning_objective_metric': if v.startswith('"') and v.endswith('"'): v = v.strip('"') hyperparameters[k] = v else: hyperparameters[k] = json.loads(v) init_params['hyperparameters'] = hyperparameters return init_params
100,544
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.GetModelStatus = channel.unary_unary( '/tensorflow.serving.ModelService/GetModelStatus', request_serializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusRequest.SerializeToString, response_deserializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusResponse.FromString, )
100,558
Start the Local Transform Job Args: input_data (dict): Describes the dataset to be transformed and the location where it is stored. output_data (dict): Identifies the location where to save the results from the transform job transform_resources (dict): compute instances for the transform job. Currently only supports local or local_gpu **kwargs: additional arguments coming from the boto request object
def start(self, input_data, output_data, transform_resources, **kwargs): self.transform_resources = transform_resources self.input_data = input_data self.output_data = output_data image = self.primary_container['Image'] instance_type = transform_resources['InstanceType'] instance_count = 1 environment = self._get_container_environment(**kwargs) # Start the container, pass the environment and wait for it to start up self.container = _SageMakerContainer(instance_type, instance_count, image, self.local_session) self.container.serve(self.primary_container['ModelDataUrl'], environment) serving_port = get_config_value('local.serving_port', self.local_session.config) or 8080 _wait_for_serving_container(serving_port) # Get capabilities from Container if needed endpoint_url = 'http://localhost:%s/execution-parameters' % serving_port response, code = _perform_request(endpoint_url) if code == 200: execution_parameters = json.loads(response.read()) # MaxConcurrentTransforms is ignored because we currently only support 1 for setting in ('BatchStrategy', 'MaxPayloadInMB'): if setting not in kwargs and setting in execution_parameters: kwargs[setting] = execution_parameters[setting] # Apply Defaults if none was provided kwargs.update(self._get_required_defaults(**kwargs)) self.start_time = datetime.datetime.now() self.batch_strategy = kwargs['BatchStrategy'] if 'Environment' in kwargs: self.environment = kwargs['Environment'] # run the batch inference requests self._perform_batch_inference(input_data, output_data, **kwargs) self.end_time = datetime.datetime.now() self.state = self._COMPLETED
100,565
Get all the Environment variables that will be passed to the container Certain input fields such as BatchStrategy have different values for the API vs the Environment variables, such as SingleRecord vs SINGLE_RECORD. This method also handles this conversion. Args: **kwargs: existing transform arguments Returns: dict: All the environment variables that should be set in the container
def _get_container_environment(self, **kwargs): environment = {} environment.update(self.primary_container['Environment']) environment['SAGEMAKER_BATCH'] = 'True' if 'MaxPayloadInMB' in kwargs: environment['SAGEMAKER_MAX_PAYLOAD_IN_MB'] = str(kwargs['MaxPayloadInMB']) if 'BatchStrategy' in kwargs: if kwargs['BatchStrategy'] == 'SingleRecord': strategy_env_value = 'SINGLE_RECORD' elif kwargs['BatchStrategy'] == 'MultiRecord': strategy_env_value = 'MULTI_RECORD' else: raise ValueError('Invalid BatchStrategy, must be \'SingleRecord\' or \'MultiRecord\'') environment['SAGEMAKER_BATCH_STRATEGY'] = strategy_env_value # we only do 1 max concurrent transform in Local Mode if 'MaxConcurrentTransforms' in kwargs and int(kwargs['MaxConcurrentTransforms']) > 1: logger.warning('Local Mode only supports 1 ConcurrentTransform. Setting MaxConcurrentTransforms to 1') environment['SAGEMAKER_MAX_CONCURRENT_TRANSFORMS'] = '1' # if there were environment variables passed to the Transformer we will pass them to the # container as well. if 'Environment' in kwargs: environment.update(kwargs['Environment']) return environment
100,567