language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def nodes(self):
"""A generator of all nodes."""
result = self._connection.execute("select * from nodes")
while True:
node = result.fetchone()
if node is None:
return
yield self._node_from_obj(node) | def nodes(self):
"""A generator of all nodes."""
result = self._connection.execute("select * from nodes")
while True:
node = result.fetchone()
if node is None:
return
yield self._node_from_obj(node) |
Python | def nodes_in_bounding_box(self, minlon, maxlon, minlat, maxlat):
"""Find all nodes which fall in the bounding box, giving a generator
of :class:`Node` instances.
"""
result = self._connection.execute("select * from nodes where longitude >= ? and longitude <= ? and latitude >= ? and latitude <= ?",
(_to_num(minlon), _to_num(maxlon), _to_num(minlat), _to_num(maxlat)))
while True:
node = result.fetchone()
if node is None:
return
yield self._node_from_obj(node) | def nodes_in_bounding_box(self, minlon, maxlon, minlat, maxlat):
"""Find all nodes which fall in the bounding box, giving a generator
of :class:`Node` instances.
"""
result = self._connection.execute("select * from nodes where longitude >= ? and longitude <= ? and latitude >= ? and latitude <= ?",
(_to_num(minlon), _to_num(maxlon), _to_num(minlat), _to_num(maxlat)))
while True:
node = result.fetchone()
if node is None:
return
yield self._node_from_obj(node) |
Python | def way(self, osm_id):
"""Return details of the way with this id. Raises KeyError on failure
to find.
:param osm_id: The OSM id of the way.
:return: An instance of :class:`Way`.
"""
result = self._connection.execute("select noderef from ways where osm_id=? order by position", (osm_id,)).fetchall()
if result is None or len(result) == 0:
raise KeyError("Way {} not found".format(osm_id))
way = _digest.Way({"id":osm_id})
for r in result:
way.add_node(r["noderef"])
for key, value in self._get_tags("way_tags", osm_id).items():
way.add_tag(key, value)
return way | def way(self, osm_id):
"""Return details of the way with this id. Raises KeyError on failure
to find.
:param osm_id: The OSM id of the way.
:return: An instance of :class:`Way`.
"""
result = self._connection.execute("select noderef from ways where osm_id=? order by position", (osm_id,)).fetchall()
if result is None or len(result) == 0:
raise KeyError("Way {} not found".format(osm_id))
way = _digest.Way({"id":osm_id})
for r in result:
way.add_node(r["noderef"])
for key, value in self._get_tags("way_tags", osm_id).items():
way.add_tag(key, value)
return way |
Python | def complete_way(self, osm_id):
"""Return full details of the way with this id: gives a complete list
of nodes, not just their ids. Raises KeyError on failure to find.
:param osm_id: The OSM id of the way. Alternatively, a :class:`Way`
instance to augment with full node details.
:return: An instance of :class:`RichWay`.
"""
if isinstance(osm_id, _digest.Way):
way = osm_id
else:
way = self.way(osm_id)
def provider():
for node_id in way.nodes:
yield self.node(node_id)
return richobjs.RichWay(way, provider()) | def complete_way(self, osm_id):
"""Return full details of the way with this id: gives a complete list
of nodes, not just their ids. Raises KeyError on failure to find.
:param osm_id: The OSM id of the way. Alternatively, a :class:`Way`
instance to augment with full node details.
:return: An instance of :class:`RichWay`.
"""
if isinstance(osm_id, _digest.Way):
way = osm_id
else:
way = self.way(osm_id)
def provider():
for node_id in way.nodes:
yield self.node(node_id)
return richobjs.RichWay(way, provider()) |
Python | def ways(self):
"""A generator of all ways."""
result = self._connection.execute("select osm_id, noderef from ways order by osm_id, position")
way = None
while True:
ref = result.fetchone()
if ref is None or (way is not None and way.osm_id != ref["osm_id"]):
for key, value in self._get_tags("way_tags", way.osm_id).items():
way.add_tag(key, value)
yield way
if ref is None:
return
if way is None or way.osm_id != ref["osm_id"]:
way = _digest.Way({"id": ref["osm_id"]})
way.add_node(ref["noderef"]) | def ways(self):
"""A generator of all ways."""
result = self._connection.execute("select osm_id, noderef from ways order by osm_id, position")
way = None
while True:
ref = result.fetchone()
if ref is None or (way is not None and way.osm_id != ref["osm_id"]):
for key, value in self._get_tags("way_tags", way.osm_id).items():
way.add_tag(key, value)
yield way
if ref is None:
return
if way is None or way.osm_id != ref["osm_id"]:
way = _digest.Way({"id": ref["osm_id"]})
way.add_node(ref["noderef"]) |
Python | def relation(self, osm_id):
"""Return details of the relation with this id. Raises KeyError on
failure to find.
:param osm_id: The OSM id of the relation.
:return: An instance of :class:`Relation`.
"""
result = self._connection.execute("select * from relations where osm_id=?", (osm_id,)).fetchall()
if result is None or len(result) == 0:
raise KeyError("Relation {} not found".format(osm_id))
rel = _digest.Relation({"id":osm_id})
for r in result:
rel.add_member(_digest.Member(type=r["member"],
ref=r["memberref"], role=r["role"]))
for key, value in self._get_tags("relation_tags", osm_id).items():
rel.add_tag(key, value)
return rel | def relation(self, osm_id):
"""Return details of the relation with this id. Raises KeyError on
failure to find.
:param osm_id: The OSM id of the relation.
:return: An instance of :class:`Relation`.
"""
result = self._connection.execute("select * from relations where osm_id=?", (osm_id,)).fetchall()
if result is None or len(result) == 0:
raise KeyError("Relation {} not found".format(osm_id))
rel = _digest.Relation({"id":osm_id})
for r in result:
rel.add_member(_digest.Member(type=r["member"],
ref=r["memberref"], role=r["role"]))
for key, value in self._get_tags("relation_tags", osm_id).items():
rel.add_tag(key, value)
return rel |
Python | def complete_relation(self, osm_id):
"""Return full details of the relation with this id: gives a complete
list of objects, not just their ids. Raises KeyError on failure to
find.
:param osm_id: The OSM id of the relation. Alternatively, a
:class:`Relation` instance to augment with full details.
:return: An instance of :class:`RichRelation`.
"""
if isinstance(osm_id, _digest.Relation):
osm_id = osm_id.osm_id
relation = self.relation(osm_id)
def provide_full_members():
for member in relation.members:
if member.type == "node":
yield self.node(member.ref)
elif member.type == "way":
yield self.complete_way(member.ref)
elif member.type == "relation":
yield self.complete_relation(member.ref)
return richobjs.RichRelation(relation, provide_full_members()) | def complete_relation(self, osm_id):
"""Return full details of the relation with this id: gives a complete
list of objects, not just their ids. Raises KeyError on failure to
find.
:param osm_id: The OSM id of the relation. Alternatively, a
:class:`Relation` instance to augment with full details.
:return: An instance of :class:`RichRelation`.
"""
if isinstance(osm_id, _digest.Relation):
osm_id = osm_id.osm_id
relation = self.relation(osm_id)
def provide_full_members():
for member in relation.members:
if member.type == "node":
yield self.node(member.ref)
elif member.type == "way":
yield self.complete_way(member.ref)
elif member.type == "relation":
yield self.complete_relation(member.ref)
return richobjs.RichRelation(relation, provide_full_members()) |
Python | def relations(self):
"""A generator of all the relations."""
result = self._connection.execute("select * from relations order by osm_id")
rel = None
while True:
ref = result.fetchone()
if ref is None or (rel is not None and rel.osm_id != ref["osm_id"]):
for key, value in self._get_tags("relation_tags", rel.osm_id).items():
rel.add_tag(key, value)
yield rel
if ref is None:
return
if rel is None or rel.osm_id != ref["osm_id"]:
rel = _digest.Relation({"id": ref["osm_id"]})
rel.add_member(_digest.Member(type=ref["member"],
ref=ref["memberref"], role=ref["role"])) | def relations(self):
"""A generator of all the relations."""
result = self._connection.execute("select * from relations order by osm_id")
rel = None
while True:
ref = result.fetchone()
if ref is None or (rel is not None and rel.osm_id != ref["osm_id"]):
for key, value in self._get_tags("relation_tags", rel.osm_id).items():
rel.add_tag(key, value)
yield rel
if ref is None:
return
if rel is None or rel.osm_id != ref["osm_id"]:
rel = _digest.Relation({"id": ref["osm_id"]})
rel.add_member(_digest.Member(type=ref["member"],
ref=ref["memberref"], role=ref["role"])) |
Python | def extract(db, minlon, maxlon, minlat, maxlat, out_filename):
"""Create a new database based on the parsed bounding box. We extract all
ways which feature at least one node in the bounding box. Then all nodes
in the bounding box, and all nodes required for these ways, are returned.
Any relation which features a node or way in the dataset is also returned
(but such a relation is allowed to also have a way/node which is not in the
dataset).
As might be expected, this can be rather memory intensive.
:param db: A :class:`OSM_SQLite` object to extract from.
:param out_filename: The new database to construct.
"""
def gen():
yield _digest.OSM("osm", {"version":db.osm.version, "generator":db.osm.generator+" / extract by OSMDigest"})
yield _digest.Bounds("bounds", {"minlon":minlon, "maxlon":maxlon, "minlat":minlat, "maxlat":maxlat})
valid_node_ids = _node_ids_in_bb(db, minlon, maxlon, minlat, maxlat)
valid_way_ids = _ways_from_nodes(db, valid_node_ids)
valid_node_ids = valid_node_ids | _all_nodes_from_ways(db, valid_way_ids)
for nodeid in valid_node_ids:
yield db.node(nodeid)
for wayid in valid_way_ids:
yield db.way(wayid)
for relation in db.relations():
if any( ( m.type=="node" and m.ref in valid_node_ids ) or
( m.type=="way" and m.ref in valid_way_ids ) for m in relation.members ):
yield relation
for _ in _convert_gen_from_any_source(gen(), out_filename):
pass | def extract(db, minlon, maxlon, minlat, maxlat, out_filename):
"""Create a new database based on the parsed bounding box. We extract all
ways which feature at least one node in the bounding box. Then all nodes
in the bounding box, and all nodes required for these ways, are returned.
Any relation which features a node or way in the dataset is also returned
(but such a relation is allowed to also have a way/node which is not in the
dataset).
As might be expected, this can be rather memory intensive.
:param db: A :class:`OSM_SQLite` object to extract from.
:param out_filename: The new database to construct.
"""
def gen():
yield _digest.OSM("osm", {"version":db.osm.version, "generator":db.osm.generator+" / extract by OSMDigest"})
yield _digest.Bounds("bounds", {"minlon":minlon, "maxlon":maxlon, "minlat":minlat, "maxlat":maxlat})
valid_node_ids = _node_ids_in_bb(db, minlon, maxlon, minlat, maxlat)
valid_way_ids = _ways_from_nodes(db, valid_node_ids)
valid_node_ids = valid_node_ids | _all_nodes_from_ways(db, valid_way_ids)
for nodeid in valid_node_ids:
yield db.node(nodeid)
for wayid in valid_way_ids:
yield db.way(wayid)
for relation in db.relations():
if any( ( m.type=="node" and m.ref in valid_node_ids ) or
( m.type=="way" and m.ref in valid_way_ids ) for m in relation.members ):
yield relation
for _ in _convert_gen_from_any_source(gen(), out_filename):
pass |
Python | def _schema_db(connection):
"""Build all the tables and indexes"""
connection.execute("create table nodes(osm_id integer primary key, longitude integer, latitude integer)")
connection.execute("create table node_tags(osm_id integer, key text, value text)")
connection.execute("create index node_tags_osm_id_idx on node_tags(osm_id)")
connection.execute("create table ways(osm_id integer, position integer, noderef integer)")
connection.execute("create index ways_idx on ways(osm_id, position)")
connection.execute("create table way_tags(osm_id integer, key text, value text)")
connection.execute("create index way_tags_osm_id_idx on way_tags(osm_id)")
connection.execute("create table relations(osm_id integer, member text, memberref integer, role text)")
connection.execute("create index relations_idx on relations(osm_id)")
connection.execute("create table relation_tags(osm_id integer, key text, value text)")
connection.execute("create index relation_tags_osm_id_idx on relation_tags(osm_id)") | def _schema_db(connection):
"""Build all the tables and indexes"""
connection.execute("create table nodes(osm_id integer primary key, longitude integer, latitude integer)")
connection.execute("create table node_tags(osm_id integer, key text, value text)")
connection.execute("create index node_tags_osm_id_idx on node_tags(osm_id)")
connection.execute("create table ways(osm_id integer, position integer, noderef integer)")
connection.execute("create index ways_idx on ways(osm_id, position)")
connection.execute("create table way_tags(osm_id integer, key text, value text)")
connection.execute("create index way_tags_osm_id_idx on way_tags(osm_id)")
connection.execute("create table relations(osm_id integer, member text, memberref integer, role text)")
connection.execute("create index relations_idx on relations(osm_id)")
connection.execute("create table relation_tags(osm_id integer, key text, value text)")
connection.execute("create index relation_tags_osm_id_idx on relation_tags(osm_id)") |
Python | def convert_gen(xml_file, db_filename):
"""Convert the passed XML file to a sqlite3 database file. As this is
rather slow, this function is a generator which will `yield` information
on its progress.
:param xml_file: Construct from the filename or file-like object; can be
anything which :module:`digest` can parse.
:param db_filename: Filename to pass to the `sqlite3` module.
"""
gen = _digest.parse(xml_file)
yield from _convert_gen_from_any_source(gen, db_filename) | def convert_gen(xml_file, db_filename):
"""Convert the passed XML file to a sqlite3 database file. As this is
rather slow, this function is a generator which will `yield` information
on its progress.
:param xml_file: Construct from the filename or file-like object; can be
anything which :module:`digest` can parse.
:param db_filename: Filename to pass to the `sqlite3` module.
"""
gen = _digest.parse(xml_file)
yield from _convert_gen_from_any_source(gen, db_filename) |
Python | def convert(xml_file, db_filename):
"""Convert the passed XML file to a sqlite3 database file.
:param xml_file: Construct from the filename or file-like object; can be
anything which :module:`digest` can parse.
:param db_filename: Filename to pass to the `sqlite3` module.
"""
for x in convert_gen(xml_file, db_filename):
pass | def convert(xml_file, db_filename):
"""Convert the passed XML file to a sqlite3 database file.
:param xml_file: Construct from the filename or file-like object; can be
anything which :module:`digest` can parse.
:param db_filename: Filename to pass to the `sqlite3` module.
"""
for x in convert_gen(xml_file, db_filename):
pass |
Python | def start(self, osm):
"""Notify of the start of the data.
:param osm: An instance of :class:`OSM` giving details of the file.
"""
pass | def start(self, osm):
"""Notify of the start of the data.
:param osm: An instance of :class:`OSM` giving details of the file.
"""
pass |
Python | def bounds(self, bounds):
"""Notify of the bounding box of the data.
:param bounds: An instance of :class:`Bounds`.
"""
pass | def bounds(self, bounds):
"""Notify of the bounding box of the data.
:param bounds: An instance of :class:`Bounds`.
"""
pass |
Python | def way(self, way):
"""Notify of a fully-formed way.
:param way: An instance of :class:`Way`.
"""
pass | def way(self, way):
"""Notify of a fully-formed way.
:param way: An instance of :class:`Way`.
"""
pass |
Python | def relation(self, node):
"""Notify of a fully-formed relation.
:param relation: An instance of :class:`Relation`.
"""
pass | def relation(self, node):
"""Notify of a fully-formed relation.
:param relation: An instance of :class:`Relation`.
"""
pass |
Python | def parse_callback(file, handler):
"""Parse the file-like object to a stream of OSM objects, as defined in
this module. We report objects via a callback mechanism, and use the SAX
parser to process the XML file.
:param file: A filename (intelligently handles ".gz", ".xz", ".bz2" file
extensions) or a file-like object.
:param handler: Should follow interface of :class:`OSMDataHandler`.
"""
if isinstance(file, str):
if file[-3:] == ".gz":
file = _gzip.open(file, mode="rt", encoding="utf-8")
elif file[-3:] == ".xz":
file = _lzma.open(file, mode="rt", encoding="utf-8")
elif file[-4:] == ".bz2":
file = _bz2.open(file, mode="rt", encoding="utf-8")
else:
file = open(file, encoding="utf-8")
with file:
_parse_callback(file, handler)
else:
_parse_callback(file, handler) | def parse_callback(file, handler):
"""Parse the file-like object to a stream of OSM objects, as defined in
this module. We report objects via a callback mechanism, and use the SAX
parser to process the XML file.
:param file: A filename (intelligently handles ".gz", ".xz", ".bz2" file
extensions) or a file-like object.
:param handler: Should follow interface of :class:`OSMDataHandler`.
"""
if isinstance(file, str):
if file[-3:] == ".gz":
file = _gzip.open(file, mode="rt", encoding="utf-8")
elif file[-3:] == ".xz":
file = _lzma.open(file, mode="rt", encoding="utf-8")
elif file[-4:] == ".bz2":
file = _bz2.open(file, mode="rt", encoding="utf-8")
else:
file = open(file, encoding="utf-8")
with file:
_parse_callback(file, handler)
else:
_parse_callback(file, handler) |
Python | def _parse_file(fileobj):
"""Actually do the parsing, using saxgen."""
with _saxgen.parse(fileobj) as gen:
current_object = None
for xml_event in gen:
if isinstance(xml_event, _saxgen.StartDocument):
pass
elif isinstance(xml_event, _saxgen.EndDocument):
pass
elif isinstance(xml_event, _saxgen.Characters):
content = xml_event.content.strip()
if len(content) > 0:
raise ValueError("Unexpected string data '{}'".format(content))
elif isinstance(xml_event, _saxgen.EndElement):
if xml_event.name in {"node", "way", "relation"}:
yield current_object
elif isinstance(xml_event, _saxgen.StartElement):
if xml_event.name == "osm":
yield OSM(xml_event.name, xml_event.attrs)
elif xml_event.name == "bounds":
yield Bounds(xml_event.name, xml_event.attrs)
elif xml_event.name == "node":
current_object = Node(xml_event.attrs)
elif xml_event.name == "way":
current_object = Way(xml_event.attrs)
elif xml_event.name == "relation":
current_object = Relation(xml_event.attrs)
elif xml_event.name == "tag":
key = xml_event.attrs["k"]
value = xml_event.attrs["v"]
current_object.tags[key] = value
elif xml_event.name == "nd":
noderef = int(xml_event.attrs["ref"])
current_object.nodes.append(noderef)
elif xml_event.name == "member":
member = Member(type=xml_event.attrs["type"],
ref=int(xml_event.attrs["ref"]),
role=xml_event.attrs["role"])
current_object.members.append(member)
else:
raise ValueError("Unexpected XML tag {}".format(xml_event))
else:
raise ValueError("Unexpected XML event {}".format(xml_event)) | def _parse_file(fileobj):
"""Actually do the parsing, using saxgen."""
with _saxgen.parse(fileobj) as gen:
current_object = None
for xml_event in gen:
if isinstance(xml_event, _saxgen.StartDocument):
pass
elif isinstance(xml_event, _saxgen.EndDocument):
pass
elif isinstance(xml_event, _saxgen.Characters):
content = xml_event.content.strip()
if len(content) > 0:
raise ValueError("Unexpected string data '{}'".format(content))
elif isinstance(xml_event, _saxgen.EndElement):
if xml_event.name in {"node", "way", "relation"}:
yield current_object
elif isinstance(xml_event, _saxgen.StartElement):
if xml_event.name == "osm":
yield OSM(xml_event.name, xml_event.attrs)
elif xml_event.name == "bounds":
yield Bounds(xml_event.name, xml_event.attrs)
elif xml_event.name == "node":
current_object = Node(xml_event.attrs)
elif xml_event.name == "way":
current_object = Way(xml_event.attrs)
elif xml_event.name == "relation":
current_object = Relation(xml_event.attrs)
elif xml_event.name == "tag":
key = xml_event.attrs["k"]
value = xml_event.attrs["v"]
current_object.tags[key] = value
elif xml_event.name == "nd":
noderef = int(xml_event.attrs["ref"])
current_object.nodes.append(noderef)
elif xml_event.name == "member":
member = Member(type=xml_event.attrs["type"],
ref=int(xml_event.attrs["ref"]),
role=xml_event.attrs["role"])
current_object.members.append(member)
else:
raise ValueError("Unexpected XML tag {}".format(xml_event))
else:
raise ValueError("Unexpected XML event {}".format(xml_event)) |
Python | def parse_sax(file):
"""Parse the file-like object to a stream of OSM objects, as defined in
this module. This is a generator; failure to consume to the end can lead
to a resource leak. Typical usage:
for obj in parse("filename.osm"):
# Handle obj which is of type OSM, Bounds, Node, Way or
# Relation
pass
Uses the SAX parser and the complicated thread-based model; is incredible
slow. See :function:`parse` for a real-world alternative.
:param file: A filename (intelligently handles ".gz", ".xz", ".bz2" file
extensions) or a file-like object.
"""
yield from _parse(file, _parse_file) | def parse_sax(file):
"""Parse the file-like object to a stream of OSM objects, as defined in
this module. This is a generator; failure to consume to the end can lead
to a resource leak. Typical usage:
for obj in parse("filename.osm"):
# Handle obj which is of type OSM, Bounds, Node, Way or
# Relation
pass
Uses the SAX parser and the complicated thread-based model; is incredible
slow. See :function:`parse` for a real-world alternative.
:param file: A filename (intelligently handles ".gz", ".xz", ".bz2" file
extensions) or a file-like object.
"""
yield from _parse(file, _parse_file) |
Python | def from_Nodes(nodes):
"""Construct a new instance from an instance of :class:`Nodes`."""
new = NodesPacked(None)
interim_list = [ (osm_id, lon, lat) for osm_id, (lon, lat) in nodes._nodes.items() ]
new._osm_ids, new._longitude, new._latitude = NodesPacked._arrays_from_unordered_list(interim_list)
return new | def from_Nodes(nodes):
"""Construct a new instance from an instance of :class:`Nodes`."""
new = NodesPacked(None)
interim_list = [ (osm_id, lon, lat) for osm_id, (lon, lat) in nodes._nodes.items() ]
new._osm_ids, new._longitude, new._latitude = NodesPacked._arrays_from_unordered_list(interim_list)
return new |
Python | def from_key(self, key):
"""Return a list of all element which have the tag `key`.
:param key: The key to search for tags with.
:return: A list of triple `(typename, value, osm_id)` where `typename`
is one of "node", "way" or "relation", `value` is the value of the
tag, and `osm_id` is the id of the element.
"""
out = []
for value, osm_id in self.nodes_from_key(key):
out.append(("node", value, osm_id))
for value, osm_id in self.ways_from_key(key):
out.append(("way", value, osm_id))
for value, osm_id in self.relations_from_key(key):
out.append(("relation", value, osm_id))
return out | def from_key(self, key):
"""Return a list of all element which have the tag `key`.
:param key: The key to search for tags with.
:return: A list of triple `(typename, value, osm_id)` where `typename`
is one of "node", "way" or "relation", `value` is the value of the
tag, and `osm_id` is the id of the element.
"""
out = []
for value, osm_id in self.nodes_from_key(key):
out.append(("node", value, osm_id))
for value, osm_id in self.ways_from_key(key):
out.append(("way", value, osm_id))
for value, osm_id in self.relations_from_key(key):
out.append(("relation", value, osm_id))
return out |
Python | def nodes_from_key(self, key):
"""Returns a list of all the nodes which have the tag key.
:param key: The key of tags to search for.
:return: list, maybe empty, of pairs `(value, id)` where `value` is the
value from the tag, and `id` is osm id of the node.
"""
return self._by_key(self.from_nodes, key) | def nodes_from_key(self, key):
"""Returns a list of all the nodes which have the tag key.
:param key: The key of tags to search for.
:return: list, maybe empty, of pairs `(value, id)` where `value` is the
value from the tag, and `id` is osm id of the node.
"""
return self._by_key(self.from_nodes, key) |
Python | def ways_from_key(self, key):
"""Returns a list of all the ways which have the tag key.
:param key: The key of tags to search for.
:return: list, maybe empty, of pairs `(value, id)` where `value` is the
value from the tag, and `id` is osm id of the way.
"""
return self._by_key(self.from_ways, key) | def ways_from_key(self, key):
"""Returns a list of all the ways which have the tag key.
:param key: The key of tags to search for.
:return: list, maybe empty, of pairs `(value, id)` where `value` is the
value from the tag, and `id` is osm id of the way.
"""
return self._by_key(self.from_ways, key) |
Python | def relations_from_key(self, key):
"""Returns a list of all the relations which have the tag key.
:param key: The key of tags to search for.
:return: list, maybe empty, of pairs `(value, id)` where `value` is the
value from the tag, and `id` is osm id of the relation.
"""
return self._by_key(self.from_relations, key) | def relations_from_key(self, key):
"""Returns a list of all the relations which have the tag key.
:param key: The key of tags to search for.
:return: list, maybe empty, of pairs `(value, id)` where `value` is the
value from the tag, and `id` is osm id of the relation.
"""
return self._by_key(self.from_relations, key) |
Python | def node(self, osm_id):
"""Return a (possibly empty) dictionary of tags for the node with
this id.
"""
if osm_id in self._nodes:
return self._nodes[osm_id]
return dict() | def node(self, osm_id):
"""Return a (possibly empty) dictionary of tags for the node with
this id.
"""
if osm_id in self._nodes:
return self._nodes[osm_id]
return dict() |
Python | def relation(self, osm_id):
"""Return a (possibly empty) dictionary of tags for the node with
this id.
"""
if osm_id in self._relations:
return self._relations[osm_id]
return dict() | def relation(self, osm_id):
"""Return a (possibly empty) dictionary of tags for the node with
this id.
"""
if osm_id in self._relations:
return self._relations[osm_id]
return dict() |
Python | def pythonify_and_pickle(file, out_filename):
"""Convert all the data in the XML file and save as pickled files for
nodes, ways, relations and tags separately.
:param file: Filename (the file will be opened 4 times, so passing a file
object will not work). Can be anything which :module:`digest` can parse.
:param out_filename: If is `test` then writes files `test_nodes.pic.xz`
through `test_tags.pic.xz`
:return: A tuple of the 4 output filenames for nodes, ways, relations
and tags.
"""
obj = NodesPacked(file)
out = [out_filename + "_nodes.pic.xz"]
pickle(obj, out[0])
for typpe, name in [(Ways, "ways"), (Relations, "relations"),
(Tags, "tags")]:
obj = None
obj = typpe(file)
name = "{}_{}.pic.xz".format(out_filename, name)
pickle(obj, name)
out.append(name)
return out | def pythonify_and_pickle(file, out_filename):
"""Convert all the data in the XML file and save as pickled files for
nodes, ways, relations and tags separately.
:param file: Filename (the file will be opened 4 times, so passing a file
object will not work). Can be anything which :module:`digest` can parse.
:param out_filename: If is `test` then writes files `test_nodes.pic.xz`
through `test_tags.pic.xz`
:return: A tuple of the 4 output filenames for nodes, ways, relations
and tags.
"""
obj = NodesPacked(file)
out = [out_filename + "_nodes.pic.xz"]
pickle(obj, out[0])
for typpe, name in [(Ways, "ways"), (Relations, "relations"),
(Tags, "tags")]:
obj = None
obj = typpe(file)
name = "{}_{}.pic.xz".format(out_filename, name)
pickle(obj, name)
out.append(name)
return out |
Python | def notify(self, data):
"""Notify of some data. Your callback handler should, after possible
processing, push data to this method. Can accept any data, but if
you notify of an :class:`Exception` then the exception will be raised
by the iterator; if you notify with the `StopIteration` type then the
iterator will stop (but the *strongly* preferred way to end iteration
is to let the callback thread end.)
Will raise an exception of type :class:`EarlyTerminate` to signal that
data generation should be stopped.
:param data: The data object to add to the internal queue.
"""
if self._terminate:
self._queue.put(StopIteration)
raise EarlyTerminate()
self._queue.put(data) | def notify(self, data):
"""Notify of some data. Your callback handler should, after possible
processing, push data to this method. Can accept any data, but if
you notify of an :class:`Exception` then the exception will be raised
by the iterator; if you notify with the `StopIteration` type then the
iterator will stop (but the *strongly* preferred way to end iteration
is to let the callback thread end.)
Will raise an exception of type :class:`EarlyTerminate` to signal that
data generation should be stopped.
:param data: The data object to add to the internal queue.
"""
if self._terminate:
self._queue.put(StopIteration)
raise EarlyTerminate()
self._queue.put(data) |
Python | def send(self, name, data):
"""Standardised way to send data. The iterator will yield an instance
of :class:`Wrapper` with the `name`/`data` pair.
:param name: Name of the callback event which generated this data.
:param data: Tuple of data, or `None`.
"""
self.notify(Wrapper(name, data)) | def send(self, name, data):
"""Standardised way to send data. The iterator will yield an instance
of :class:`Wrapper` with the `name`/`data` pair.
:param name: Name of the callback event which generated this data.
:param data: Tuple of data, or `None`.
"""
self.notify(Wrapper(name, data)) |
Python | def geojson_from_node(node):
"""Construct a simple GeoJSON object (as a python dictionary) from a
node.
"""
coords = [node.longitude, node.latitude]
return {"geometry":{"type": "Point", "coordinates": coords}, "properties": _tags_with_id(node)} | def geojson_from_node(node):
"""Construct a simple GeoJSON object (as a python dictionary) from a
node.
"""
coords = [node.longitude, node.latitude]
return {"geometry":{"type": "Point", "coordinates": coords}, "properties": _tags_with_id(node)} |
Python | def geojson_from_way(way, polygonise=False):
"""Construct a simple GeoJSON object (as a python dictionary) from a
way. If the `way` is an instance of :class:`RichWay` then the geometry
is converted to a line-string. Otherwise there is empty geometry, but an
extra property of "nodes".
:param way: The way to convert.
:param polygonise: Optionally, set to True to return the geometry as a
polygon.
"""
json = {"geometry":{}, "properties":_tags_with_id(way)}
try:
coords = []
for node in way.complete_nodes:
coords.append([node.longitude, node.latitude])
if polygonise:
json["geometry"] = {"type": "Polygon", "coordinates": [coords]}
else:
json["geometry"] = {"type": "LineString", "coordinates": coords}
except:
json["properties"]["nodes"] = way.nodes
return json | def geojson_from_way(way, polygonise=False):
"""Construct a simple GeoJSON object (as a python dictionary) from a
way. If the `way` is an instance of :class:`RichWay` then the geometry
is converted to a line-string. Otherwise there is empty geometry, but an
extra property of "nodes".
:param way: The way to convert.
:param polygonise: Optionally, set to True to return the geometry as a
polygon.
"""
json = {"geometry":{}, "properties":_tags_with_id(way)}
try:
coords = []
for node in way.complete_nodes:
coords.append([node.longitude, node.latitude])
if polygonise:
json["geometry"] = {"type": "Polygon", "coordinates": [coords]}
else:
json["geometry"] = {"type": "LineString", "coordinates": coords}
except:
json["properties"]["nodes"] = way.nodes
return json |
Python | def geoseries_from_way(way):
"""Convert a :class:`RichWay` instance to a :class:`GeoSeries`. Each way
will be returned as a "line string" which is a zero area, not closed
object. Some Open Street Map ways are better represented as closed regions
with area (a "polygon") but it is hard to tell this, automatically, from
context, without knowing a lot about how to interpret tags.
:param way: An instance of :class:`RichWay`.
:return: An instance of :class:`GeoSeries` with the geometry and tags of
the way.
"""
points = [((node.longitude, node.latitude)) for node in way.complete_nodes]
data = {"geometry": _geometry.LineString(points)}
for key, value in way.tags.items():
data[key] = value
data["osm_id"] = way.osm_id
return gpd.GeoSeries(data) | def geoseries_from_way(way):
"""Convert a :class:`RichWay` instance to a :class:`GeoSeries`. Each way
will be returned as a "line string" which is a zero area, not closed
object. Some Open Street Map ways are better represented as closed regions
with area (a "polygon") but it is hard to tell this, automatically, from
context, without knowing a lot about how to interpret tags.
:param way: An instance of :class:`RichWay`.
:return: An instance of :class:`GeoSeries` with the geometry and tags of
the way.
"""
points = [((node.longitude, node.latitude)) for node in way.complete_nodes]
data = {"geometry": _geometry.LineString(points)}
for key, value in way.tags.items():
data[key] = value
data["osm_id"] = way.osm_id
return gpd.GeoSeries(data) |
Python | def geodataframe_from_relation(relation):
"""Convert a relation into a :class:`GeoDataFrame` by (recursively)
converting each member into a point (for a node) or a line-string (for a
way), and collecting all tags (which may lead to many columns in the data
frame). The first row will be the tags of the relation itself, and
further rows detail the members. If a member is a relation, then that
relation will be expanded out in the same way.
:return: An instance of :class:`GeoDataFrame` with the geometry and tags
of all members of the relation.
"""
return gpd.GeoDataFrame.from_features(_features_from_relation(relation)) | def geodataframe_from_relation(relation):
"""Convert a relation into a :class:`GeoDataFrame` by (recursively)
converting each member into a point (for a node) or a line-string (for a
way), and collecting all tags (which may lead to many columns in the data
frame). The first row will be the tags of the relation itself, and
further rows detail the members. If a member is a relation, then that
relation will be expanded out in the same way.
:return: An instance of :class:`GeoDataFrame` with the geometry and tags
of all members of the relation.
"""
return gpd.GeoDataFrame.from_features(_features_from_relation(relation)) |
Python | def geoseries_from_relation(relation):
"""Attempt to convert an instance of :class:`RichRelation` to a
:class:`GeoSeries`, with some intelligence. For exploring relations of
unknown type, the :func:`geodataframe_from_relation` might be more useful.
Currently, we ignore the "type" tag of the relation, and instead look for
any ways with "role" of "inner" or "outer". These are then ordered to try
to form a Multi-Polygon, see
http://wiki.openstreetmap.org/wiki/Relation:multipolygon
Certain valid OSM constructs, like
http://wiki.openstreetmap.org/wiki/Relation:multipolygon#Island_within_a_hole
may not be supported.
Otherwise, the conversion fails.
:param relation: An instance of :class:`RichRelation`.
:return: An instance of :class:`GeoSeries` with the geometry and tags of
the relation. If conversion fails, then `None`.
"""
geo = _geo_from_relation(relation)
if geo is None:
return None
data = {"geometry": geo, "osm_id": relation.osm_id}
for key, value in relation.tags.items():
data[key] = value
return gpd.GeoSeries(data) | def geoseries_from_relation(relation):
"""Attempt to convert an instance of :class:`RichRelation` to a
:class:`GeoSeries`, with some intelligence. For exploring relations of
unknown type, the :func:`geodataframe_from_relation` might be more useful.
Currently, we ignore the "type" tag of the relation, and instead look for
any ways with "role" of "inner" or "outer". These are then ordered to try
to form a Multi-Polygon, see
http://wiki.openstreetmap.org/wiki/Relation:multipolygon
Certain valid OSM constructs, like
http://wiki.openstreetmap.org/wiki/Relation:multipolygon#Island_within_a_hole
may not be supported.
Otherwise, the conversion fails.
:param relation: An instance of :class:`RichRelation`.
:return: An instance of :class:`GeoSeries` with the geometry and tags of
the relation. If conversion fails, then `None`.
"""
geo = _geo_from_relation(relation)
if geo is None:
return None
data = {"geometry": geo, "osm_id": relation.osm_id}
for key, value in relation.tags.items():
data[key] = value
return gpd.GeoSeries(data) |
Python | def polygonise(series):
"""A helper method. Changes the geometry of the passed series object
to a polygon, using `shapely`.
:return: The (in place) altered series.
"""
polys = list(_ops.polygonize(series["geometry"]))
if len(polys) == 1:
series["geometry"] = polys[0]
elif len(polys) > 1:
series["geometry"] = _geometry.MultiPolygon(polys)
return series | def polygonise(series):
"""A helper method. Changes the geometry of the passed series object
to a polygon, using `shapely`.
:return: The (in place) altered series.
"""
polys = list(_ops.polygonize(series["geometry"]))
if len(polys) == 1:
series["geometry"] = polys[0]
elif len(polys) > 1:
series["geometry"] = _geometry.MultiPolygon(polys)
return series |
Python | def shapely_to_geojson(geo):
"""Convert `shapely` geometry objects to GeoJSON. Supported are:
- Point
- LineString and LinearRing (the same code)
- Polygon
- MultiPolygon
"""
t = geo.geometryType()
if t in _shapely_converters:
return {"geometry":_shapely_converters[t](geo)}
raise NotImplementedError("Cannot convert " + t) | def shapely_to_geojson(geo):
"""Convert `shapely` geometry objects to GeoJSON. Supported are:
- Point
- LineString and LinearRing (the same code)
- Polygon
- MultiPolygon
"""
t = geo.geometryType()
if t in _shapely_converters:
return {"geometry":_shapely_converters[t](geo)}
raise NotImplementedError("Cannot convert " + t) |
Python | def fit(self, data, columns=None):
"""
This function is used to inspect the reference dataframe.
It will fetch for the columns to convert and the categories by column.
:param data: pandas DataFrame
:param columns: list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
"""
self._get_columns_to_encode(data, columns)
self._get_categories_by_column(data)
self.logger.debug('The data is fitted with success.') | def fit(self, data, columns=None):
"""
This function is used to inspect the reference dataframe.
It will fetch for the columns to convert and the categories by column.
:param data: pandas DataFrame
:param columns: list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
"""
self._get_columns_to_encode(data, columns)
self._get_categories_by_column(data)
self.logger.debug('The data is fitted with success.') |
Python | def write(alignments, handle, format):
"""Write complete set of alignments to a file.
Arguments:
- alignments - A list (or iterator) of MultipleSeqAlignment objects,
or a single alignment object.
- handle - File handle object to write to, or filename as string
(note older versions of Biopython only took a handle).
- format - lower case string describing the file format to write.
You should close the handle after calling this function.
Returns the number of alignments written (as an integer).
"""
from Bio import SeqIO
# Try and give helpful error messages:
if not isinstance(format, str):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError("Format string '%s' should be lower case" % format)
if isinstance(alignments, MultipleSeqAlignment):
# This raised an exception in older versions of Biopython
alignments = [alignments]
with as_handle(handle, "w") as fp:
# Map the file format to a writer class
if format in _FormatToWriter:
writer_class = _FormatToWriter[format]
count = writer_class(fp).write_file(alignments)
elif format in SeqIO._FormatToWriter:
# Exploit the existing SeqIO parser to do the dirty work!
# TODO - Can we make one call to SeqIO.write() and count the alignments?
count = 0
for alignment in alignments:
if not isinstance(alignment, MultipleSeqAlignment):
raise TypeError(
"Expect a list or iterator of MultipleSeqAlignment "
"objects, got: %r" % alignment
)
SeqIO.write(alignment, fp, format)
count += 1
elif format in _FormatToIterator or format in SeqIO._FormatToIterator:
raise ValueError(
"Reading format '%s' is supported, but not writing" % format
)
else:
raise ValueError("Unknown format '%s'" % format)
if not isinstance(count, int):
raise RuntimeError(
"Internal error - the underlying %s "
"writer should have returned the alignment count, not %r" % (format, count)
)
return count | def write(alignments, handle, format):
"""Write complete set of alignments to a file.
Arguments:
- alignments - A list (or iterator) of MultipleSeqAlignment objects,
or a single alignment object.
- handle - File handle object to write to, or filename as string
(note older versions of Biopython only took a handle).
- format - lower case string describing the file format to write.
You should close the handle after calling this function.
Returns the number of alignments written (as an integer).
"""
from Bio import SeqIO
# Try and give helpful error messages:
if not isinstance(format, str):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError("Format string '%s' should be lower case" % format)
if isinstance(alignments, MultipleSeqAlignment):
# This raised an exception in older versions of Biopython
alignments = [alignments]
with as_handle(handle, "w") as fp:
# Map the file format to a writer class
if format in _FormatToWriter:
writer_class = _FormatToWriter[format]
count = writer_class(fp).write_file(alignments)
elif format in SeqIO._FormatToWriter:
# Exploit the existing SeqIO parser to do the dirty work!
# TODO - Can we make one call to SeqIO.write() and count the alignments?
count = 0
for alignment in alignments:
if not isinstance(alignment, MultipleSeqAlignment):
raise TypeError(
"Expect a list or iterator of MultipleSeqAlignment "
"objects, got: %r" % alignment
)
SeqIO.write(alignment, fp, format)
count += 1
elif format in _FormatToIterator or format in SeqIO._FormatToIterator:
raise ValueError(
"Reading format '%s' is supported, but not writing" % format
)
else:
raise ValueError("Unknown format '%s'" % format)
if not isinstance(count, int):
raise RuntimeError(
"Internal error - the underlying %s "
"writer should have returned the alignment count, not %r" % (format, count)
)
return count |
Python | def _SeqIO_to_alignment_iterator(handle, format, seq_count=None):
"""Use Bio.SeqIO to create an MultipleSeqAlignment iterator (PRIVATE).
Arguments:
- handle - handle to the file.
- format - string describing the file format.
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If count is omitted (default) then all the sequences in the file are
combined into a single MultipleSeqAlignment.
"""
from Bio import SeqIO
if format not in SeqIO._FormatToIterator:
raise ValueError("Unknown format '%s'" % format)
if seq_count:
# Use the count to split the records into batches.
seq_record_iterator = SeqIO.parse(handle, format)
records = []
for record in seq_record_iterator:
records.append(record)
if len(records) == seq_count:
yield MultipleSeqAlignment(records)
records = []
if records:
raise ValueError("Check seq_count argument, not enough sequences?")
else:
# Must assume that there is a single alignment using all
# the SeqRecord objects:
records = list(SeqIO.parse(handle, format))
if records:
yield MultipleSeqAlignment(records) | def _SeqIO_to_alignment_iterator(handle, format, seq_count=None):
"""Use Bio.SeqIO to create an MultipleSeqAlignment iterator (PRIVATE).
Arguments:
- handle - handle to the file.
- format - string describing the file format.
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If count is omitted (default) then all the sequences in the file are
combined into a single MultipleSeqAlignment.
"""
from Bio import SeqIO
if format not in SeqIO._FormatToIterator:
raise ValueError("Unknown format '%s'" % format)
if seq_count:
# Use the count to split the records into batches.
seq_record_iterator = SeqIO.parse(handle, format)
records = []
for record in seq_record_iterator:
records.append(record)
if len(records) == seq_count:
yield MultipleSeqAlignment(records)
records = []
if records:
raise ValueError("Check seq_count argument, not enough sequences?")
else:
# Must assume that there is a single alignment using all
# the SeqRecord objects:
records = list(SeqIO.parse(handle, format))
if records:
yield MultipleSeqAlignment(records) |
Python | def parse(handle, format, seq_count=None):
"""Iterate over an alignment file as MultipleSeqAlignment objects.
Arguments:
- handle - handle to the file, or the filename as a string
(note older versions of Biopython only took a handle).
- format - string describing the file format.
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If you have the file name in a string 'filename', use:
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> for alignment in AlignIO.parse(filename, format):
... print("Alignment of length %i" % alignment.get_alignment_length())
Alignment of length 124
Alignment of length 119
Alignment of length 120
Alignment of length 118
Alignment of length 125
If you have a string 'data' containing the file contents, use::
from Bio import AlignIO
from io import StringIO
my_iterator = AlignIO.parse(StringIO(data), format)
Use the Bio.AlignIO.read() function when you expect a single record only.
"""
from Bio import SeqIO
# Try and give helpful error messages:
if not isinstance(format, str):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError("Format string '%s' should be lower case" % format)
if seq_count is not None and not isinstance(seq_count, int):
raise TypeError("Need integer for seq_count (sequences per alignment)")
with as_handle(handle) as fp:
# Map the file format to a sequence iterator:
if format in _FormatToIterator:
iterator_generator = _FormatToIterator[format]
i = iterator_generator(fp, seq_count)
elif format in SeqIO._FormatToIterator:
# Exploit the existing SeqIO parser to the dirty work!
i = _SeqIO_to_alignment_iterator(fp, format, seq_count=seq_count)
else:
raise ValueError("Unknown format '%s'" % format)
yield from i | def parse(handle, format, seq_count=None):
"""Iterate over an alignment file as MultipleSeqAlignment objects.
Arguments:
- handle - handle to the file, or the filename as a string
(note older versions of Biopython only took a handle).
- format - string describing the file format.
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If you have the file name in a string 'filename', use:
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> for alignment in AlignIO.parse(filename, format):
... print("Alignment of length %i" % alignment.get_alignment_length())
Alignment of length 124
Alignment of length 119
Alignment of length 120
Alignment of length 118
Alignment of length 125
If you have a string 'data' containing the file contents, use::
from Bio import AlignIO
from io import StringIO
my_iterator = AlignIO.parse(StringIO(data), format)
Use the Bio.AlignIO.read() function when you expect a single record only.
"""
from Bio import SeqIO
# Try and give helpful error messages:
if not isinstance(format, str):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError("Format string '%s' should be lower case" % format)
if seq_count is not None and not isinstance(seq_count, int):
raise TypeError("Need integer for seq_count (sequences per alignment)")
with as_handle(handle) as fp:
# Map the file format to a sequence iterator:
if format in _FormatToIterator:
iterator_generator = _FormatToIterator[format]
i = iterator_generator(fp, seq_count)
elif format in SeqIO._FormatToIterator:
# Exploit the existing SeqIO parser to the dirty work!
i = _SeqIO_to_alignment_iterator(fp, format, seq_count=seq_count)
else:
raise ValueError("Unknown format '%s'" % format)
yield from i |
Python | def read(handle, format, seq_count=None):
"""Turn an alignment file into a single MultipleSeqAlignment object.
Arguments:
- handle - handle to the file, or the filename as a string
(note older versions of Biopython only took a handle).
- format - string describing the file format.
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If the handle contains no alignments, or more than one alignment,
an exception is raised. For example, using a PFAM/Stockholm file
containing one alignment:
>>> from Bio import AlignIO
>>> filename = "Clustalw/protein.aln"
>>> format = "clustal"
>>> alignment = AlignIO.read(filename, format)
>>> print("Alignment of length %i" % alignment.get_alignment_length())
Alignment of length 411
If however you want the first alignment from a file containing
multiple alignments this function would raise an exception.
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> alignment = AlignIO.read(filename, format)
Traceback (most recent call last):
...
ValueError: More than one record found in handle
Instead use:
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> alignment = next(AlignIO.parse(filename, format))
>>> print("First alignment has length %i" % alignment.get_alignment_length())
First alignment has length 124
You must use the Bio.AlignIO.parse() function if you want to read multiple
records from the handle.
"""
iterator = parse(handle, format, seq_count)
try:
alignment = next(iterator)
except StopIteration:
raise ValueError("No records found in handle") from None
try:
next(iterator)
raise ValueError("More than one record found in handle")
except StopIteration:
pass
if seq_count:
if len(alignment) != seq_count:
raise RuntimeError(
"More sequences found in alignment than specified in seq_count: %s."
% seq_count
)
return alignment | def read(handle, format, seq_count=None):
"""Turn an alignment file into a single MultipleSeqAlignment object.
Arguments:
- handle - handle to the file, or the filename as a string
(note older versions of Biopython only took a handle).
- format - string describing the file format.
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If the handle contains no alignments, or more than one alignment,
an exception is raised. For example, using a PFAM/Stockholm file
containing one alignment:
>>> from Bio import AlignIO
>>> filename = "Clustalw/protein.aln"
>>> format = "clustal"
>>> alignment = AlignIO.read(filename, format)
>>> print("Alignment of length %i" % alignment.get_alignment_length())
Alignment of length 411
If however you want the first alignment from a file containing
multiple alignments this function would raise an exception.
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> alignment = AlignIO.read(filename, format)
Traceback (most recent call last):
...
ValueError: More than one record found in handle
Instead use:
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> alignment = next(AlignIO.parse(filename, format))
>>> print("First alignment has length %i" % alignment.get_alignment_length())
First alignment has length 124
You must use the Bio.AlignIO.parse() function if you want to read multiple
records from the handle.
"""
iterator = parse(handle, format, seq_count)
try:
alignment = next(iterator)
except StopIteration:
raise ValueError("No records found in handle") from None
try:
next(iterator)
raise ValueError("More than one record found in handle")
except StopIteration:
pass
if seq_count:
if len(alignment) != seq_count:
raise RuntimeError(
"More sequences found in alignment than specified in seq_count: %s."
% seq_count
)
return alignment |
Python | def convert(in_file, in_format, out_file, out_format, molecule_type=None):
"""Convert between two alignment files, returns number of alignments.
Arguments:
- in_file - an input handle or filename
- in_format - input file format, lower case string
- output - an output handle or filename
- out_file - output file format, lower case string
- molecule_type - optional molecule type to apply, string containing
"DNA", "RNA" or "protein".
**NOTE** - If you provide an output filename, it will be opened which will
overwrite any existing file without warning. This may happen if even the
conversion is aborted (e.g. an invalid out_format name is given).
"""
if molecule_type:
if not isinstance(molecule_type, str):
raise TypeError("Molecule type should be a string, not %r" % molecule_type)
elif (
"DNA" in molecule_type
or "RNA" in molecule_type
or "protein" in molecule_type
):
pass
else:
raise ValueError("Unexpected molecule type, %r" % molecule_type)
# TODO - Add optimised versions of important conversions
# For now just off load the work to SeqIO parse/write
# Don't open the output file until we've checked the input is OK:
alignments = parse(in_file, in_format, None)
if molecule_type:
# Edit the records on the fly to set molecule type
def over_ride(alignment):
"""Over-ride molecule in-place."""
for record in alignment:
record.annotations["molecule_type"] = molecule_type
return alignment
alignments = (over_ride(_) for _ in alignments)
return write(alignments, out_file, out_format) | def convert(in_file, in_format, out_file, out_format, molecule_type=None):
"""Convert between two alignment files, returns number of alignments.
Arguments:
- in_file - an input handle or filename
- in_format - input file format, lower case string
- output - an output handle or filename
- out_file - output file format, lower case string
- molecule_type - optional molecule type to apply, string containing
"DNA", "RNA" or "protein".
**NOTE** - If you provide an output filename, it will be opened which will
overwrite any existing file without warning. This may happen if even the
conversion is aborted (e.g. an invalid out_format name is given).
"""
if molecule_type:
if not isinstance(molecule_type, str):
raise TypeError("Molecule type should be a string, not %r" % molecule_type)
elif (
"DNA" in molecule_type
or "RNA" in molecule_type
or "protein" in molecule_type
):
pass
else:
raise ValueError("Unexpected molecule type, %r" % molecule_type)
# TODO - Add optimised versions of important conversions
# For now just off load the work to SeqIO parse/write
# Don't open the output file until we've checked the input is OK:
alignments = parse(in_file, in_format, None)
if molecule_type:
# Edit the records on the fly to set molecule type
def over_ride(alignment):
"""Over-ride molecule in-place."""
for record in alignment:
record.annotations["molecule_type"] = molecule_type
return alignment
alignments = (over_ride(_) for _ in alignments)
return write(alignments, out_file, out_format) |
Python | def deploy(k8s_conf, user):
"""
Installs and configures a Kubernetes cluster
:param k8s_conf: the k8s configuration dict
:param user: the sudo user used to apply the playbook
:raises: Exception should snaps-kubernetes fail to deploy successfully
"""
logger.info('Setting up k8s cluster')
__pre_install(k8s_conf, user)
k8_utils.execute(k8s_conf)
__post_install(k8s_conf, user)
logger.info('Completed setting up k8s') | def deploy(k8s_conf, user):
"""
Installs and configures a Kubernetes cluster
:param k8s_conf: the k8s configuration dict
:param user: the sudo user used to apply the playbook
:raises: Exception should snaps-kubernetes fail to deploy successfully
"""
logger.info('Setting up k8s cluster')
__pre_install(k8s_conf, user)
k8_utils.execute(k8s_conf)
__post_install(k8s_conf, user)
logger.info('Completed setting up k8s') |
Python | def __pre_install(k8s_conf, user):
"""
Temporary fix to ensure apt works on properly as we have encountered issues
with /etc/resolv.conf DNS setting getting removed after the node has been
rebooted
:param k8s_conf: the snaps-kubernetes dict
:param user: the sudo user used to apply the playbook
:raises: Exception should the ansible playbook fail to execute successfully
"""
node_ips = k8s_config_utils.get_minion_node_ips(k8s_conf)
ansible_utils.apply_playbook(
consts.TEMP_NODE_SETUP_PB, node_ips, user) | def __pre_install(k8s_conf, user):
"""
Temporary fix to ensure apt works on properly as we have encountered issues
with /etc/resolv.conf DNS setting getting removed after the node has been
rebooted
:param k8s_conf: the snaps-kubernetes dict
:param user: the sudo user used to apply the playbook
:raises: Exception should the ansible playbook fail to execute successfully
"""
node_ips = k8s_config_utils.get_minion_node_ips(k8s_conf)
ansible_utils.apply_playbook(
consts.TEMP_NODE_SETUP_PB, node_ips, user) |
Python | def __install_nvidia_docker(k8s_conf, user):
"""
Install nvidia-docker so containers can access NVIDIA GPUs
:param user: the sudo user used to apply the playbook
:raises: Exception should snaps-kubernetes fail to deploy successfully
"""
logger.debug('Installing nvidia-docker')
node_ips = k8s_config_utils.get_minion_node_ips(k8s_conf)
ansible_utils.apply_playbook(
consts.SETUP_NVIDIA_DOCKER_PB, node_ips, user,
variables={'DAEMON_JSON_FILE': consts.NVIDIA_DOCKER_CONF}) | def __install_nvidia_docker(k8s_conf, user):
"""
Install nvidia-docker so containers can access NVIDIA GPUs
:param user: the sudo user used to apply the playbook
:raises: Exception should snaps-kubernetes fail to deploy successfully
"""
logger.debug('Installing nvidia-docker')
node_ips = k8s_config_utils.get_minion_node_ips(k8s_conf)
ansible_utils.apply_playbook(
consts.SETUP_NVIDIA_DOCKER_PB, node_ips, user,
variables={'DAEMON_JSON_FILE': consts.NVIDIA_DOCKER_CONF}) |
Python | def __install_k8s_hw_specs(k8s_conf, hw_type):
"""
Install nvidia k8s plugin so k8s pods can access NVIDIA GPUs
:param k8s_conf: the snaps-kubernetes configuration dict
:param hw_type: the type of HW to install
:raises: Exception should snaps-kubernetes fail to deploy successfully
"""
logger.debug('Installing k8s [%s] plugin', hw_type)
k8s_version = config_utils.get_k8s_version(k8s_conf, True)
spec_url = None
if hw_type == 'gpu':
spec_url = consts.GPU_K8S_SPEC_URL
elif hw_type == 'fpga':
spec_url = consts.FPGA_K8S_SPEC_URL
if spec_url and k8s_version.startswith('1.18'):
logger.info('Installing k8s hardware plugin')
pb_vars = {
'K8S_VERSION': config_utils.get_k8s_version(k8s_conf, True),
'K8S_PROJ_DIR': k8s_config_utils.get_project_artifact_dir(
k8s_conf),
'K8S_SPEC_URL': spec_url,
'type': hw_type,
'http_proxy': k8s_config_utils.get_proxy_dict(
k8s_conf)['http_proxy'],
'https_proxy': k8s_config_utils.get_proxy_dict(
k8s_conf)['http_proxy']
}
ansible_utils.apply_playbook(
consts.SETUP_K8S_HW_PLUGIN_PB, variables=pb_vars)
else:
logger.info('No reason to install hardware plugins. K8s version %s',
k8s_version) | def __install_k8s_hw_specs(k8s_conf, hw_type):
"""
Install nvidia k8s plugin so k8s pods can access NVIDIA GPUs
:param k8s_conf: the snaps-kubernetes configuration dict
:param hw_type: the type of HW to install
:raises: Exception should snaps-kubernetes fail to deploy successfully
"""
logger.debug('Installing k8s [%s] plugin', hw_type)
k8s_version = config_utils.get_k8s_version(k8s_conf, True)
spec_url = None
if hw_type == 'gpu':
spec_url = consts.GPU_K8S_SPEC_URL
elif hw_type == 'fpga':
spec_url = consts.FPGA_K8S_SPEC_URL
if spec_url and k8s_version.startswith('1.18'):
logger.info('Installing k8s hardware plugin')
pb_vars = {
'K8S_VERSION': config_utils.get_k8s_version(k8s_conf, True),
'K8S_PROJ_DIR': k8s_config_utils.get_project_artifact_dir(
k8s_conf),
'K8S_SPEC_URL': spec_url,
'type': hw_type,
'http_proxy': k8s_config_utils.get_proxy_dict(
k8s_conf)['http_proxy'],
'https_proxy': k8s_config_utils.get_proxy_dict(
k8s_conf)['http_proxy']
}
ansible_utils.apply_playbook(
consts.SETUP_K8S_HW_PLUGIN_PB, variables=pb_vars)
else:
logger.info('No reason to install hardware plugins. K8s version %s',
k8s_version) |
Python | def __install_kubevirt(k8s_conf,user):
"""
Installs kubevirt in the cluster nodes.
"""
logger.debug('__install_kubevirt')
kubevirt = config_utils.get_kubevirt_cfg(k8s_conf)
if kubevirt == 'true':
master_ip = config_utils.get_master_ip(k8s_conf)
pb_vars = {
'KUBEVIRT_VER': consts.KUBEVIRT_VERSION,
'KUBEVIRT_URL': consts.KUBEVIRT_URL
}
ansible_utils.apply_playbook(consts.SETUP_KUBEVIRT_PB,
master_ip, user, variables=pb_vars)
else:
logger.info('No reason to Setup Kubevirt') | def __install_kubevirt(k8s_conf,user):
"""
Installs kubevirt in the cluster nodes.
"""
logger.debug('__install_kubevirt')
kubevirt = config_utils.get_kubevirt_cfg(k8s_conf)
if kubevirt == 'true':
master_ip = config_utils.get_master_ip(k8s_conf)
pb_vars = {
'KUBEVIRT_VER': consts.KUBEVIRT_VERSION,
'KUBEVIRT_URL': consts.KUBEVIRT_URL
}
ansible_utils.apply_playbook(consts.SETUP_KUBEVIRT_PB,
master_ip, user, variables=pb_vars)
else:
logger.info('No reason to Setup Kubevirt') |
Python | def provision_preparation(k8s_conf):
"""
This method is responsible for setting up this host for k8s provisioning
:param k8s_conf: the configuration dict object
"""
node_configs = config_utils.get_node_configs(k8s_conf)
if node_configs and len(node_configs) > 0:
for node_config in node_configs:
host = node_config[consts.HOST_KEY]
pb_vars = {'hostname': host[consts.HOSTNAME_KEY],
'ip': host[consts.IP_KEY]}
ansible_utils.apply_playbook(consts.SETUP_ETC_HOSTS,
variables=pb_vars)
else:
raise Exception('No hosts to deploy - Aborting') | def provision_preparation(k8s_conf):
"""
This method is responsible for setting up this host for k8s provisioning
:param k8s_conf: the configuration dict object
"""
node_configs = config_utils.get_node_configs(k8s_conf)
if node_configs and len(node_configs) > 0:
for node_config in node_configs:
host = node_config[consts.HOST_KEY]
pb_vars = {'hostname': host[consts.HOSTNAME_KEY],
'ip': host[consts.IP_KEY]}
ansible_utils.apply_playbook(consts.SETUP_ETC_HOSTS,
variables=pb_vars)
else:
raise Exception('No hosts to deploy - Aborting') |
Python | def clean_up_k8(k8s_conf, multus_enabled_str):
"""
This function is used for clean/Reset the kubernetes cluster
"""
multus_enabled = str(multus_enabled_str)
project_name = config_utils.get_project_name(k8s_conf)
kubespray_pb = "{}/{}".format(config_utils.get_kubespray_dir(k8s_conf),
consts.KUBESPRAY_CLUSTER_RESET_PB)
inv_filename = config_utils.get_kubespray_inv_file(k8s_conf)
logger.info('Calling Kubespray reset.yaml with inventory %s', inv_filename)
try:
pb_vars = {'reset_confirmation': 'yes'}
pb_vars.update(ANSIBLE_VERSION_DICT)
ansible_utils.apply_playbook(
kubespray_pb, host_user=config_utils.get_node_user(k8s_conf),
variables=pb_vars,
inventory_file=inv_filename, become_user='root')
except Exception as e:
logger.warn('Error running playbook %s with error %s', kubespray_pb, e)
logger.info("Docker cleanup starts")
ips = config_utils.get_host_ips(k8s_conf)
try:
ansible_utils.apply_playbook(
consts.K8_DOCKER_CLEAN_UP_ON_NODES, ips,
config_utils.get_node_user(k8s_conf))
except Exception as e:
logger.warn('Error running playbook %s with error %s',
consts.K8_DOCKER_CLEAN_UP_ON_NODES, e)
host_ips = config_utils.get_hostname_ips_dict(k8s_conf)
for host_name, ip in host_ips.items():
pb_vars = {
'ip': ip,
'host_name': host_name,
'Project_name': project_name,
'multus_enabled': multus_enabled,
}
try:
ansible_utils.apply_playbook(
consts.K8_REMOVE_NODE_K8, [ip],
config_utils.get_node_user(k8s_conf),
variables=pb_vars)
except Exception as e:
logger.warn('Error running playbook %s with error %s',
consts.K8_REMOVE_NODE_K8, e)
logger.info('EXECUTING REMOVE PROJECT FOLDER PLAY')
pb_vars = {
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
'Project_name': project_name,
}
try:
ansible_utils.apply_playbook(consts.K8_REMOVE_FOLDER,
variables=pb_vars)
except Exception as e:
logger.warn('Error running playbook %s with error %s',
consts.K8_REMOVE_FOLDER, e) | def clean_up_k8(k8s_conf, multus_enabled_str):
"""
This function is used for clean/Reset the kubernetes cluster
"""
multus_enabled = str(multus_enabled_str)
project_name = config_utils.get_project_name(k8s_conf)
kubespray_pb = "{}/{}".format(config_utils.get_kubespray_dir(k8s_conf),
consts.KUBESPRAY_CLUSTER_RESET_PB)
inv_filename = config_utils.get_kubespray_inv_file(k8s_conf)
logger.info('Calling Kubespray reset.yaml with inventory %s', inv_filename)
try:
pb_vars = {'reset_confirmation': 'yes'}
pb_vars.update(ANSIBLE_VERSION_DICT)
ansible_utils.apply_playbook(
kubespray_pb, host_user=config_utils.get_node_user(k8s_conf),
variables=pb_vars,
inventory_file=inv_filename, become_user='root')
except Exception as e:
logger.warn('Error running playbook %s with error %s', kubespray_pb, e)
logger.info("Docker cleanup starts")
ips = config_utils.get_host_ips(k8s_conf)
try:
ansible_utils.apply_playbook(
consts.K8_DOCKER_CLEAN_UP_ON_NODES, ips,
config_utils.get_node_user(k8s_conf))
except Exception as e:
logger.warn('Error running playbook %s with error %s',
consts.K8_DOCKER_CLEAN_UP_ON_NODES, e)
host_ips = config_utils.get_hostname_ips_dict(k8s_conf)
for host_name, ip in host_ips.items():
pb_vars = {
'ip': ip,
'host_name': host_name,
'Project_name': project_name,
'multus_enabled': multus_enabled,
}
try:
ansible_utils.apply_playbook(
consts.K8_REMOVE_NODE_K8, [ip],
config_utils.get_node_user(k8s_conf),
variables=pb_vars)
except Exception as e:
logger.warn('Error running playbook %s with error %s',
consts.K8_REMOVE_NODE_K8, e)
logger.info('EXECUTING REMOVE PROJECT FOLDER PLAY')
pb_vars = {
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
'Project_name': project_name,
}
try:
ansible_utils.apply_playbook(consts.K8_REMOVE_FOLDER,
variables=pb_vars)
except Exception as e:
logger.warn('Error running playbook %s with error %s',
consts.K8_REMOVE_FOLDER, e) |
Python | def start_k8s_install(k8s_conf):
"""
This function is used for deploy the kubernet cluster
"""
logger.info('Starting Kubernetes installation')
__set_hostnames(k8s_conf)
__kubespray(k8s_conf)
__complete_k8s_install(k8s_conf)
logger.info('Completed Kubernetes installation') | def start_k8s_install(k8s_conf):
"""
This function is used for deploy the kubernet cluster
"""
logger.info('Starting Kubernetes installation')
__set_hostnames(k8s_conf)
__kubespray(k8s_conf)
__complete_k8s_install(k8s_conf)
logger.info('Completed Kubernetes installation') |
Python | def launch_crd_network(k8s_conf):
"""
This function is used to create crd network
"""
master_host_name, master_ip = config_utils.get_first_master_host(k8s_conf)
logger.info('EXECUTING CRD NETWORK CREATION PLAY. Master ip - %s, '
'Master Host Name - %s', master_ip, master_host_name)
pb_vars = {
'CRD_NET_YML': consts.K8S_CRD_NET_CONF,
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
}
ansible_utils.apply_playbook(consts.K8_CREATE_CRD_NETWORK,
variables=pb_vars) | def launch_crd_network(k8s_conf):
"""
This function is used to create crd network
"""
master_host_name, master_ip = config_utils.get_first_master_host(k8s_conf)
logger.info('EXECUTING CRD NETWORK CREATION PLAY. Master ip - %s, '
'Master Host Name - %s', master_ip, master_host_name)
pb_vars = {
'CRD_NET_YML': consts.K8S_CRD_NET_CONF,
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
}
ansible_utils.apply_playbook(consts.K8_CREATE_CRD_NETWORK,
variables=pb_vars) |
Python | def launch_multus_cni(k8s_conf):
"""
This function is used to launch multus cni
"""
logger.info('EXECUTING MULTUS CNI PLAY')
networking_plugin = config_utils.get_networking_plugin(k8s_conf)
master_ips = config_utils.get_master_node_ips(k8s_conf)
minion_ips = config_utils.get_minion_node_ips(k8s_conf)
ips = master_ips
for minion_ip in minion_ips:
ips.append(minion_ip)
ansible_utils.apply_playbook(
consts.K8_MULTUS_NODE_BIN, ips,
config_utils.get_node_user(k8s_conf),
variables=config_utils.get_kubespray_proxy_dict(k8s_conf))
ips = config_utils.get_minion_node_ips(k8s_conf)
ansible_utils.apply_playbook(
consts.K8_MULTUS_SET_NODE, ips, config_utils.get_node_user(k8s_conf),
variables={
'networking_plugin': networking_plugin,
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf),
'KUBERNETES_PATH': consts.NODE_K8S_PATH,
}) | def launch_multus_cni(k8s_conf):
"""
This function is used to launch multus cni
"""
logger.info('EXECUTING MULTUS CNI PLAY')
networking_plugin = config_utils.get_networking_plugin(k8s_conf)
master_ips = config_utils.get_master_node_ips(k8s_conf)
minion_ips = config_utils.get_minion_node_ips(k8s_conf)
ips = master_ips
for minion_ip in minion_ips:
ips.append(minion_ip)
ansible_utils.apply_playbook(
consts.K8_MULTUS_NODE_BIN, ips,
config_utils.get_node_user(k8s_conf),
variables=config_utils.get_kubespray_proxy_dict(k8s_conf))
ips = config_utils.get_minion_node_ips(k8s_conf)
ansible_utils.apply_playbook(
consts.K8_MULTUS_SET_NODE, ips, config_utils.get_node_user(k8s_conf),
variables={
'networking_plugin': networking_plugin,
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf),
'KUBERNETES_PATH': consts.NODE_K8S_PATH,
}) |
Python | def create_cluster_role(k8s_conf):
"""
This function is used to launch multus cni
"""
logger.info('EXECUTING CREATE CLUSTER ROLE PLAY')
pb_vars = {
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf)}
ansible_utils.apply_playbook(
consts.K8_MULTUS_SET_MASTER,
variables=pb_vars)
logger.info('Setting nodes in cluster role definition')
node_configs = config_utils.get_node_configs(k8s_conf)
if node_configs and len(node_configs) > 0:
for node_config in node_configs:
host = node_config[consts.HOST_KEY]
pb_vars = {
'hostname': host[consts.HOSTNAME_KEY],
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf)}
ansible_utils.apply_playbook(
consts.K8_MULTUS_CLUSTER_ROLE_DEFINE, variables=pb_vars)
logger.info('EXECUTING cluster role creation')
pb_vars = {
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf)}
ansible_utils.apply_playbook(
consts.K8_MULTUS_CLUSTER_ROLE_CREATION,
variables=pb_vars) | def create_cluster_role(k8s_conf):
"""
This function is used to launch multus cni
"""
logger.info('EXECUTING CREATE CLUSTER ROLE PLAY')
pb_vars = {
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf)}
ansible_utils.apply_playbook(
consts.K8_MULTUS_SET_MASTER,
variables=pb_vars)
logger.info('Setting nodes in cluster role definition')
node_configs = config_utils.get_node_configs(k8s_conf)
if node_configs and len(node_configs) > 0:
for node_config in node_configs:
host = node_config[consts.HOST_KEY]
pb_vars = {
'hostname': host[consts.HOSTNAME_KEY],
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf)}
ansible_utils.apply_playbook(
consts.K8_MULTUS_CLUSTER_ROLE_DEFINE, variables=pb_vars)
logger.info('EXECUTING cluster role creation')
pb_vars = {
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf)}
ansible_utils.apply_playbook(
consts.K8_MULTUS_CLUSTER_ROLE_CREATION,
variables=pb_vars) |
Python | def launch_sriov_cni_configuration(k8s_conf):
"""
This function is used to launch sriov cni
"""
logger.info('EXECUTING SRIOV CNI PLAY')
networking_plugin = config_utils.get_networking_plugin(k8s_conf)
dpdk_driver = 'vfio-pci'
dpdk_enable = False
sriov_cfgs = config_utils.get_multus_cni_sriov_cfgs(k8s_conf)
for sriov_cfg in sriov_cfgs:
sriov_host = sriov_cfg[consts.HOST_KEY]
# for sriov_net in sriov_hosts:
hostname = sriov_host[consts.HOSTNAME_KEY]
for sriov_net in sriov_host[consts.SRIOV_NETWORKS_KEY]:
dpdk_enable = config_utils.bool_val(
sriov_net.get(consts.SRIOV_DPDK_ENABLE_KEY, None))
pb_vars = {
'host_name': hostname,
'sriov_intf': sriov_net[consts.SRIOV_INTF_KEY],
'networking_plugin': networking_plugin,
'KUBERNETES_PATH': consts.NODE_K8S_PATH,
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf),
}
ansible_utils.apply_playbook(
consts.K8_SRIOV_ENABLE, [hostname],
config_utils.get_node_user(k8s_conf),
variables=pb_vars)
pb_vars = config_utils.get_proxy_dict(k8s_conf)
pb_vars.update(
{'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)})
ansible_utils.apply_playbook(consts.K8_SRIOV_CNI_BUILD, variables=pb_vars)
logger.info('DPDK flag is %s', dpdk_enable)
if dpdk_enable is True:
pb_vars = config_utils.get_proxy_dict(k8s_conf)
pb_vars.update(
{'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)})
ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CNI,
variables=pb_vars)
master_nodes_tuple_3 = config_utils.get_master_nodes_ip_name_type(k8s_conf)
for hostname, ip, host_type in master_nodes_tuple_3:
logger.info('INSTALLING SRIOV BIN ON MASTER. Master Host Name - %s, '
'Master Host Type - %s', hostname, host_type)
ansible_utils.apply_playbook(
consts.K8_SRIOV_CNI_BIN_INST, [ip],
config_utils.get_node_user(k8s_conf),
variables={
'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)})
if dpdk_enable is True:
logger.info('INSTALLING SRIOV DPDK BIN ON MASTER')
ansible_utils.apply_playbook(
consts.K8_SRIOV_DPDK_CNI_BIN_INST, [ip],
config_utils.get_node_user(k8s_conf),
variables={
'SRC_PACKAGE_PATH':
config_utils.get_artifact_dir(k8s_conf)})
minon_ips = config_utils.get_minion_node_ips(k8s_conf)
ansible_utils.apply_playbook(
consts.K8_SRIOV_DPDK_CNI_BIN_INST, [minon_ips],
config_utils.get_node_user(k8s_conf),
variables={
'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)})
if dpdk_enable is True:
logger.info('INSTALLING SRIOV DPDK BIN ON WORKERS')
ansible_utils.apply_playbook(
consts.K8_SRIOV_DPDK_DRIVER_LOAD, [minon_ips],
config_utils.get_node_user(k8s_conf),
variables={'dpdk_driver': dpdk_driver})
ansible_utils.apply_playbook(
consts.K8_SRIOV_DPDK_CNI_BIN_INST, [minon_ips],
config_utils.get_node_user(k8s_conf),
variables={
'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)}) | def launch_sriov_cni_configuration(k8s_conf):
"""
This function is used to launch sriov cni
"""
logger.info('EXECUTING SRIOV CNI PLAY')
networking_plugin = config_utils.get_networking_plugin(k8s_conf)
dpdk_driver = 'vfio-pci'
dpdk_enable = False
sriov_cfgs = config_utils.get_multus_cni_sriov_cfgs(k8s_conf)
for sriov_cfg in sriov_cfgs:
sriov_host = sriov_cfg[consts.HOST_KEY]
# for sriov_net in sriov_hosts:
hostname = sriov_host[consts.HOSTNAME_KEY]
for sriov_net in sriov_host[consts.SRIOV_NETWORKS_KEY]:
dpdk_enable = config_utils.bool_val(
sriov_net.get(consts.SRIOV_DPDK_ENABLE_KEY, None))
pb_vars = {
'host_name': hostname,
'sriov_intf': sriov_net[consts.SRIOV_INTF_KEY],
'networking_plugin': networking_plugin,
'KUBERNETES_PATH': consts.NODE_K8S_PATH,
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf),
}
ansible_utils.apply_playbook(
consts.K8_SRIOV_ENABLE, [hostname],
config_utils.get_node_user(k8s_conf),
variables=pb_vars)
pb_vars = config_utils.get_proxy_dict(k8s_conf)
pb_vars.update(
{'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)})
ansible_utils.apply_playbook(consts.K8_SRIOV_CNI_BUILD, variables=pb_vars)
logger.info('DPDK flag is %s', dpdk_enable)
if dpdk_enable is True:
pb_vars = config_utils.get_proxy_dict(k8s_conf)
pb_vars.update(
{'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)})
ansible_utils.apply_playbook(consts.K8_SRIOV_DPDK_CNI,
variables=pb_vars)
master_nodes_tuple_3 = config_utils.get_master_nodes_ip_name_type(k8s_conf)
for hostname, ip, host_type in master_nodes_tuple_3:
logger.info('INSTALLING SRIOV BIN ON MASTER. Master Host Name - %s, '
'Master Host Type - %s', hostname, host_type)
ansible_utils.apply_playbook(
consts.K8_SRIOV_CNI_BIN_INST, [ip],
config_utils.get_node_user(k8s_conf),
variables={
'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)})
if dpdk_enable is True:
logger.info('INSTALLING SRIOV DPDK BIN ON MASTER')
ansible_utils.apply_playbook(
consts.K8_SRIOV_DPDK_CNI_BIN_INST, [ip],
config_utils.get_node_user(k8s_conf),
variables={
'SRC_PACKAGE_PATH':
config_utils.get_artifact_dir(k8s_conf)})
minon_ips = config_utils.get_minion_node_ips(k8s_conf)
ansible_utils.apply_playbook(
consts.K8_SRIOV_DPDK_CNI_BIN_INST, [minon_ips],
config_utils.get_node_user(k8s_conf),
variables={
'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)})
if dpdk_enable is True:
logger.info('INSTALLING SRIOV DPDK BIN ON WORKERS')
ansible_utils.apply_playbook(
consts.K8_SRIOV_DPDK_DRIVER_LOAD, [minon_ips],
config_utils.get_node_user(k8s_conf),
variables={'dpdk_driver': dpdk_driver})
ansible_utils.apply_playbook(
consts.K8_SRIOV_DPDK_CNI_BIN_INST, [minon_ips],
config_utils.get_node_user(k8s_conf),
variables={
'SRC_PACKAGE_PATH': config_utils.get_artifact_dir(k8s_conf)}) |
Python | def create_weave_interface(k8s_conf, weave_detail):
"""
This function is used to create weave interace and network
"""
logger.info('CREATING WEAVE NETWORK')
network_dict = weave_detail.get(consts.WEAVE_NET_DTLS_KEY)
network_name = network_dict.get(consts.NETWORK_NAME_KEY)
logger.info('Creating weave network with name - %s', network_name)
pb_vars = {
'networkName': network_name,
'masterPlugin': network_dict.get(consts.MASTER_PLUGIN_KEY),
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf),
# variables for weave-net.yml.j2 found in kubespray roles
'kube_pods_subnet': network_dict.get(consts.SUBNET_KEY),
'enable_network_policy': 0,
'kube_version': config_utils.get_version(k8s_conf),
'weave_kube_image_repo': 'docker.io/weaveworks/weave-kube',
'weave_kube_image_tag': '2.5.0',
'weave_npc_image_tag': '2.5.0',
'k8s_image_pull_policy': 'IfNotPresent',
'weave_npc_image_repo': 'docker.io/weaveworks/weave-npc',
'weave_password': 'password'
}
ansible_utils.apply_playbook(
consts.K8_CONF_WEAVE_NETWORK_CREATION, variables=pb_vars) | def create_weave_interface(k8s_conf, weave_detail):
"""
This function is used to create weave interace and network
"""
logger.info('CREATING WEAVE NETWORK')
network_dict = weave_detail.get(consts.WEAVE_NET_DTLS_KEY)
network_name = network_dict.get(consts.NETWORK_NAME_KEY)
logger.info('Creating weave network with name - %s', network_name)
pb_vars = {
'networkName': network_name,
'masterPlugin': network_dict.get(consts.MASTER_PLUGIN_KEY),
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
'KUBESPRAY_PATH': config_utils.get_kubespray_dir(k8s_conf),
# variables for weave-net.yml.j2 found in kubespray roles
'kube_pods_subnet': network_dict.get(consts.SUBNET_KEY),
'enable_network_policy': 0,
'kube_version': config_utils.get_version(k8s_conf),
'weave_kube_image_repo': 'docker.io/weaveworks/weave-kube',
'weave_kube_image_tag': '2.5.0',
'weave_npc_image_tag': '2.5.0',
'k8s_image_pull_policy': 'IfNotPresent',
'weave_npc_image_repo': 'docker.io/weaveworks/weave-npc',
'weave_password': 'password'
}
ansible_utils.apply_playbook(
consts.K8_CONF_WEAVE_NETWORK_CREATION, variables=pb_vars) |
Python | def launch_ceph_kubernetes(k8s_conf):
"""
This function is used for deploy the ceph
TODO/FIXME - Ceph and should be removed and Rook/Ceph should be used
"""
# Setup Ceph OSD hosts
ceph_osds = config_utils.get_ceph_osds(k8s_conf)
for ceph_osd in ceph_osds:
ip = ceph_osd[consts.IP_KEY]
pb_vars = {
'osd_host_name': ceph_osd[consts.HOSTNAME_KEY],
'user_id': ceph_osd[consts.USER_KEY],
'passwd': ceph_osd[consts.PASSWORD_KEY],
'osd_ip': ip,
}
ansible_utils.apply_playbook(
consts.INSTALL_CEPH, [ip], config_utils.get_node_user(k8s_conf),
variables=pb_vars)
proxy_dict = config_utils.get_proxy_dict(k8s_conf)
ceph_hosts_info = config_utils.get_ceph_hosts_info(k8s_conf)
ceph_master_host = ceph_hosts_info[0][0]
ceph_master_ip = ceph_hosts_info[0][1]
ceph_osds_info = config_utils.get_ceph_osds_info(k8s_conf)
for host_name, ip, host_type in ceph_osds_info:
pb_vars = {
'host_name': host_name,
'master_host_ip': ceph_master_ip,
}
pb_vars.update(proxy_dict)
logger.info('Executing CEPH deploy play. IP - %s, '
'Host Type - %s', ip, host_type)
ansible_utils.apply_playbook(
consts.CEPH_DEPLOY, [host_name],
config_utils.get_node_user(k8s_conf),
variables=pb_vars)
ansible_utils.apply_playbook(
consts.CEPH_MON, [ceph_master_ip],
config_utils.get_node_user(k8s_conf),
variables=proxy_dict)
for ceph_host in ceph_osds:
second_storage = ceph_host.get(consts.STORAGE_TYPE_KEY)
if second_storage and isinstance(second_storage, list):
for storage in second_storage:
pb_vars = {
'host_name': ceph_host[consts.HOSTNAME_KEY],
'master_host_name': ceph_master_host,
'storage': storage,
}
pb_vars.update(proxy_dict)
ansible_utils.apply_playbook(
consts.CEPH_STORAGE_NODE, [ceph_host[consts.IP_KEY]],
config_utils.get_node_user(k8s_conf), variables=pb_vars)
ansible_utils.apply_playbook(
consts.CEPH_STORAGE_HOST, [ceph_master_host],
config_utils.get_node_user(k8s_conf), variables=pb_vars)
for host_name, ip, host_type in ceph_hosts_info:
pb_vars = {
'host_name': host_name,
'master_host_name': ceph_master_host,
}
pb_vars.update(proxy_dict)
ansible_utils.apply_playbook(
consts.CEPH_DEPLOY_ADMIN, [ip],
config_utils.get_node_user(k8s_conf),
variables=pb_vars)
pb_vars = {
'master_host_name': ceph_master_host,
}
pb_vars.update(proxy_dict)
ansible_utils.apply_playbook(
consts.CEPH_MDS, [ip], config_utils.get_node_user(k8s_conf),
variables=pb_vars)
proxy_dict = config_utils.get_kubespray_proxy_dict(k8s_conf)
pb_vars = {
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
'CEPH_FAST_RDB_YML': consts.K8S_CEPH_RDB_J2,
'ceph_controller_ip': ceph_master_ip,
}
pb_vars.update(proxy_dict)
ansible_utils.apply_playbook(
consts.KUBERNETES_CEPH_CLASS, [ceph_master_ip],
config_utils.get_node_user(k8s_conf),
variables=pb_vars)
ceph_claims = config_utils.get_ceph_claims(k8s_conf)
for claim in ceph_claims:
pb_vars = {
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf),
'ceph_storage_size': claim[consts.CEPH_STORAGE_KEY],
'ceph_claim_name': claim[consts.CEPH_CLAIM_NAME_KEY],
'CEPH_VC_YML': consts.K8S_CEPH_VC_J2,
}
pb_vars.update(proxy_dict)
ansible_utils.apply_playbook(
consts.KUBERNETES_CEPH_CLAIM, variables=pb_vars) | def launch_ceph_kubernetes(k8s_conf):
"""
This function is used for deploy the ceph
TODO/FIXME - Ceph and should be removed and Rook/Ceph should be used
"""
# Setup Ceph OSD hosts
ceph_osds = config_utils.get_ceph_osds(k8s_conf)
for ceph_osd in ceph_osds:
ip = ceph_osd[consts.IP_KEY]
pb_vars = {
'osd_host_name': ceph_osd[consts.HOSTNAME_KEY],
'user_id': ceph_osd[consts.USER_KEY],
'passwd': ceph_osd[consts.PASSWORD_KEY],
'osd_ip': ip,
}
ansible_utils.apply_playbook(
consts.INSTALL_CEPH, [ip], config_utils.get_node_user(k8s_conf),
variables=pb_vars)
proxy_dict = config_utils.get_proxy_dict(k8s_conf)
ceph_hosts_info = config_utils.get_ceph_hosts_info(k8s_conf)
ceph_master_host = ceph_hosts_info[0][0]
ceph_master_ip = ceph_hosts_info[0][1]
ceph_osds_info = config_utils.get_ceph_osds_info(k8s_conf)
for host_name, ip, host_type in ceph_osds_info:
pb_vars = {
'host_name': host_name,
'master_host_ip': ceph_master_ip,
}
pb_vars.update(proxy_dict)
logger.info('Executing CEPH deploy play. IP - %s, '
'Host Type - %s', ip, host_type)
ansible_utils.apply_playbook(
consts.CEPH_DEPLOY, [host_name],
config_utils.get_node_user(k8s_conf),
variables=pb_vars)
ansible_utils.apply_playbook(
consts.CEPH_MON, [ceph_master_ip],
config_utils.get_node_user(k8s_conf),
variables=proxy_dict)
for ceph_host in ceph_osds:
second_storage = ceph_host.get(consts.STORAGE_TYPE_KEY)
if second_storage and isinstance(second_storage, list):
for storage in second_storage:
pb_vars = {
'host_name': ceph_host[consts.HOSTNAME_KEY],
'master_host_name': ceph_master_host,
'storage': storage,
}
pb_vars.update(proxy_dict)
ansible_utils.apply_playbook(
consts.CEPH_STORAGE_NODE, [ceph_host[consts.IP_KEY]],
config_utils.get_node_user(k8s_conf), variables=pb_vars)
ansible_utils.apply_playbook(
consts.CEPH_STORAGE_HOST, [ceph_master_host],
config_utils.get_node_user(k8s_conf), variables=pb_vars)
for host_name, ip, host_type in ceph_hosts_info:
pb_vars = {
'host_name': host_name,
'master_host_name': ceph_master_host,
}
pb_vars.update(proxy_dict)
ansible_utils.apply_playbook(
consts.CEPH_DEPLOY_ADMIN, [ip],
config_utils.get_node_user(k8s_conf),
variables=pb_vars)
pb_vars = {
'master_host_name': ceph_master_host,
}
pb_vars.update(proxy_dict)
ansible_utils.apply_playbook(
consts.CEPH_MDS, [ip], config_utils.get_node_user(k8s_conf),
variables=pb_vars)
proxy_dict = config_utils.get_kubespray_proxy_dict(k8s_conf)
pb_vars = {
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(k8s_conf),
'CEPH_FAST_RDB_YML': consts.K8S_CEPH_RDB_J2,
'ceph_controller_ip': ceph_master_ip,
}
pb_vars.update(proxy_dict)
ansible_utils.apply_playbook(
consts.KUBERNETES_CEPH_CLASS, [ceph_master_ip],
config_utils.get_node_user(k8s_conf),
variables=pb_vars)
ceph_claims = config_utils.get_ceph_claims(k8s_conf)
for claim in ceph_claims:
pb_vars = {
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf),
'ceph_storage_size': claim[consts.CEPH_STORAGE_KEY],
'ceph_claim_name': claim[consts.CEPH_CLAIM_NAME_KEY],
'CEPH_VC_YML': consts.K8S_CEPH_VC_J2,
}
pb_vars.update(proxy_dict)
ansible_utils.apply_playbook(
consts.KUBERNETES_CEPH_CLAIM, variables=pb_vars) |
Python | def launch_persitent_volume_kubernetes(k8s_conf):
"""
This function is used for deploy the persistent_volume
"""
vol_claims = config_utils.get_persist_vol_claims(k8s_conf)
for vol_claim in vol_claims:
pb_vars = {
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf),
'TASK_PV_VOL_CONF': consts.K8S_VOL_PV_VOL_J2,
'TASK_PV_CLAIM_CONF': consts.K8S_VOL_PV_CLAIM_J2,
'storage_size': vol_claim[consts.STORAGE_KEY],
'claim_name': vol_claim[consts.CLAIM_NAME_KEY],
}
ansible_utils.apply_playbook(
consts.KUBERNETES_PERSISTENT_VOL, variables=pb_vars) | def launch_persitent_volume_kubernetes(k8s_conf):
"""
This function is used for deploy the persistent_volume
"""
vol_claims = config_utils.get_persist_vol_claims(k8s_conf)
for vol_claim in vol_claims:
pb_vars = {
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf),
'TASK_PV_VOL_CONF': consts.K8S_VOL_PV_VOL_J2,
'TASK_PV_CLAIM_CONF': consts.K8S_VOL_PV_CLAIM_J2,
'storage_size': vol_claim[consts.STORAGE_KEY],
'claim_name': vol_claim[consts.CLAIM_NAME_KEY],
}
ansible_utils.apply_playbook(
consts.KUBERNETES_PERSISTENT_VOL, variables=pb_vars) |
Python | def __install_kubectl(k8s_conf):
"""
This function is used to install kubectl at bootstrap node
"""
host_name, ip = config_utils.get_first_master_host(k8s_conf)
api_ip_url = config_utils.get_k8s_api_url(k8s_conf, ip)
pb_vars = {
'ip': ip,
'api_ip_url': api_ip_url,
'node_user': config_utils.get_node_user(k8s_conf),
'host_name': host_name,
'Project_name': config_utils.get_project_name(k8s_conf),
'CONFIG_DEMO_FILE': consts.KUBECTL_CONF_TMPLT,
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf),
'KUBERNETES_PATH': consts.NODE_K8S_PATH,
}
pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
ansible_utils.apply_playbook(consts.K8_KUBECTL_INSTALLATION,
variables=pb_vars) | def __install_kubectl(k8s_conf):
"""
This function is used to install kubectl at bootstrap node
"""
host_name, ip = config_utils.get_first_master_host(k8s_conf)
api_ip_url = config_utils.get_k8s_api_url(k8s_conf, ip)
pb_vars = {
'ip': ip,
'api_ip_url': api_ip_url,
'node_user': config_utils.get_node_user(k8s_conf),
'host_name': host_name,
'Project_name': config_utils.get_project_name(k8s_conf),
'CONFIG_DEMO_FILE': consts.KUBECTL_CONF_TMPLT,
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf),
'KUBERNETES_PATH': consts.NODE_K8S_PATH,
}
pb_vars.update(config_utils.get_proxy_dict(k8s_conf))
ansible_utils.apply_playbook(consts.K8_KUBECTL_INSTALLATION,
variables=pb_vars) |
Python | def delete_default_weave_interface(k8s_conf):
"""
This function is used to delete default weave interface
"""
if config_utils.get_networking_plugin(k8s_conf) == consts.WEAVE_TYPE:
network_name = config_utils.get_default_network(
k8s_conf)[consts.NETWORK_NAME_KEY]
ansible_utils.apply_playbook(
consts.K8_DELETE_WEAVE_INTERFACE, variables={
'networkName': network_name,
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf)}) | def delete_default_weave_interface(k8s_conf):
"""
This function is used to delete default weave interface
"""
if config_utils.get_networking_plugin(k8s_conf) == consts.WEAVE_TYPE:
network_name = config_utils.get_default_network(
k8s_conf)[consts.NETWORK_NAME_KEY]
ansible_utils.apply_playbook(
consts.K8_DELETE_WEAVE_INTERFACE, variables={
'networkName': network_name,
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf)}) |
Python | def delete_flannel_interfaces(k8s_conf):
"""
This function is used to delete flannel interfaces
"""
logger.info('EXECUTING FLANNEL INTERFACE DELETION PLAY')
multus_flannel_cfgs = config_utils.get_multus_cni_flannel_cfgs(k8s_conf)
for multus_flannel_cfg in multus_flannel_cfgs:
hostdetails = multus_flannel_cfg.get(consts.FLANNEL_NET_DTLS_KEY)
network_name = hostdetails.get(consts.NETWORK_NAME_KEY)
pb_vars = {
'node_type': consts.NODE_TYPE_MASTER,
'networkName': network_name,
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf),
}
master_host_name, master_ip = config_utils.get_first_master_host(
k8s_conf)
logger.info('Executing delete flannel interface play. '
'Master Host Name - %s', master_host_name)
if master_ip:
ansible_utils.apply_playbook(
consts.K8_DELETE_FLANNEL_INTERFACE, [master_ip],
config_utils.get_node_user(k8s_conf), variables=pb_vars) | def delete_flannel_interfaces(k8s_conf):
"""
This function is used to delete flannel interfaces
"""
logger.info('EXECUTING FLANNEL INTERFACE DELETION PLAY')
multus_flannel_cfgs = config_utils.get_multus_cni_flannel_cfgs(k8s_conf)
for multus_flannel_cfg in multus_flannel_cfgs:
hostdetails = multus_flannel_cfg.get(consts.FLANNEL_NET_DTLS_KEY)
network_name = hostdetails.get(consts.NETWORK_NAME_KEY)
pb_vars = {
'node_type': consts.NODE_TYPE_MASTER,
'networkName': network_name,
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf),
}
master_host_name, master_ip = config_utils.get_first_master_host(
k8s_conf)
logger.info('Executing delete flannel interface play. '
'Master Host Name - %s', master_host_name)
if master_ip:
ansible_utils.apply_playbook(
consts.K8_DELETE_FLANNEL_INTERFACE, [master_ip],
config_utils.get_node_user(k8s_conf), variables=pb_vars) |
Python | def delete_weave_interface(k8s_conf):
"""
This function is used to delete weave interface
"""
logger.info('EXECUTING WEAVE INTERFACE DELETION PLAY')
weave_details = config_utils.get_multus_cni_weave_cfgs(k8s_conf)
for weave_detail in weave_details:
network_name = weave_detail.get(consts.NETWORK_NAME_KEY)
ansible_utils.apply_playbook(
consts.K8_DELETE_WEAVE_INTERFACE, variables={
'networkName': network_name,
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf)}) | def delete_weave_interface(k8s_conf):
"""
This function is used to delete weave interface
"""
logger.info('EXECUTING WEAVE INTERFACE DELETION PLAY')
weave_details = config_utils.get_multus_cni_weave_cfgs(k8s_conf)
for weave_detail in weave_details:
network_name = weave_detail.get(consts.NETWORK_NAME_KEY)
ansible_utils.apply_playbook(
consts.K8_DELETE_WEAVE_INTERFACE, variables={
'networkName': network_name,
'PROJ_ARTIFACT_DIR': config_utils.get_project_artifact_dir(
k8s_conf)}) |
Python | def post_processing(config):
"""
Detects hardware installed on machines and installs the necessary drivers
:param config: the configuration
:raises: Exception should snaps-boot fail to execute successfully
"""
raise NotImplementedError | def post_processing(config):
"""
Detects hardware installed on machines and installs the necessary drivers
:param config: the configuration
:raises: Exception should snaps-boot fail to execute successfully
"""
raise NotImplementedError |
Python | def validate_kubernetes_tag(config):
"""
Checks the presence of Kubernetes tag
"""
logger.info("checking kubernetes tag")
validate_dict_data(config, consts.K8S_KEY) | def validate_kubernetes_tag(config):
"""
Checks the presence of Kubernetes tag
"""
logger.info("checking kubernetes tag")
validate_dict_data(config, consts.K8S_KEY) |
Python | def validate_kubernetes_params(config):
"""
Checks the presence of Kubernetes parameters
"""
logger.info("checking kubernetes params")
k8_config = config.get(consts.K8S_KEY)
validate_dict_data(k8_config, consts.PROJECT_NAME_KEY)
validate_dict_data(k8_config, consts.METRICS_SERVER_KEY)
validate_dict_data(k8_config, consts.NODE_CONF_KEY)
validate_dict_data(k8_config, consts.DOCKER_REPO_KEY)
validate_dict_data(k8_config, consts.NETWORKS_KEY)
validate_dict_data(k8_config, consts.PERSIST_VOL_KEY)
validate_dict_data(k8_config, consts.CPU_ALLOC_KEY) | def validate_kubernetes_params(config):
"""
Checks the presence of Kubernetes parameters
"""
logger.info("checking kubernetes params")
k8_config = config.get(consts.K8S_KEY)
validate_dict_data(k8_config, consts.PROJECT_NAME_KEY)
validate_dict_data(k8_config, consts.METRICS_SERVER_KEY)
validate_dict_data(k8_config, consts.NODE_CONF_KEY)
validate_dict_data(k8_config, consts.DOCKER_REPO_KEY)
validate_dict_data(k8_config, consts.NETWORKS_KEY)
validate_dict_data(k8_config, consts.PERSIST_VOL_KEY)
validate_dict_data(k8_config, consts.CPU_ALLOC_KEY) |
Python | def validate_hostnames(config):
"""
Ensures that each configured hostname is unique
:param config: the k8s config
:raises ValidationException
"""
logger.info('Checking to ensure all hostnames are unique')
nodes_info = config_utils.get_nodes_ip_name_type(config)
hostnames = set()
for hostname, ip, node_type in nodes_info:
hostnames.add(hostname)
if len(nodes_info) != len(hostnames):
raise ValidationException('Hostnames must be unique - {}'.format(
nodes_info)) | def validate_hostnames(config):
"""
Ensures that each configured hostname is unique
:param config: the k8s config
:raises ValidationException
"""
logger.info('Checking to ensure all hostnames are unique')
nodes_info = config_utils.get_nodes_ip_name_type(config)
hostnames = set()
for hostname, ip, node_type in nodes_info:
hostnames.add(hostname)
if len(nodes_info) != len(hostnames):
raise ValidationException('Hostnames must be unique - {}'.format(
nodes_info)) |
Python | def __validate_load_balancer_ip(api_ext_loadbalancer_dict, hostname_map):
"""
function to validate loadbalancer ip must not be same as
master/minion ip
:param api_ext_loadbalancer_dict:
:param hostname_map:
:return:
"""
logger.info("Argument List:\n api_ext_loadbalancer_dict: %s\n "
"hostname_map: %s", api_ext_loadbalancer_dict,
hostname_map)
for host in hostname_map:
if hostname_map[host] == api_ext_loadbalancer_dict.get(
consts.HA_API_EXT_LB_KEY).get(consts.IP_KEY):
logger.info('Alert !! load balancer ip must not be '
'same as master/minion ip')
return False
return True | def __validate_load_balancer_ip(api_ext_loadbalancer_dict, hostname_map):
"""
function to validate loadbalancer ip must not be same as
master/minion ip
:param api_ext_loadbalancer_dict:
:param hostname_map:
:return:
"""
logger.info("Argument List:\n api_ext_loadbalancer_dict: %s\n "
"hostname_map: %s", api_ext_loadbalancer_dict,
hostname_map)
for host in hostname_map:
if hostname_map[host] == api_ext_loadbalancer_dict.get(
consts.HA_API_EXT_LB_KEY).get(consts.IP_KEY):
logger.info('Alert !! load balancer ip must not be '
'same as master/minion ip')
return False
return True |
Python | def __validate_load_balancer_port(api_ext_loadbalancer_dict):
"""
function to validate loadbalancer port must not be same as master
api server default port 6443
:param api_ext_loadbalancer_dict:
:return:
"""
logger.info("Argument List:\n api_ext_loadbalancer_dict: %s",
api_ext_loadbalancer_dict)
lb_port = api_ext_loadbalancer_dict.get(
consts.HA_API_EXT_LB_KEY).get("port")
if lb_port == 6443:
logger.info('Alert !! load balancer port must not be same as '
'master api server default port 6443 ')
return False
elif lb_port == "":
logger.info('Alert !! load balancer port must not be null/empty ')
return False
return True | def __validate_load_balancer_port(api_ext_loadbalancer_dict):
"""
function to validate loadbalancer port must not be same as master
api server default port 6443
:param api_ext_loadbalancer_dict:
:return:
"""
logger.info("Argument List:\n api_ext_loadbalancer_dict: %s",
api_ext_loadbalancer_dict)
lb_port = api_ext_loadbalancer_dict.get(
consts.HA_API_EXT_LB_KEY).get("port")
if lb_port == 6443:
logger.info('Alert !! load balancer port must not be same as '
'master api server default port 6443 ')
return False
elif lb_port == "":
logger.info('Alert !! load balancer port must not be null/empty ')
return False
return True |
Python | def validate_countmasters(config):
"""
Raises an ValidationException when the master node count is even or < 1
:param config: the k8s config dict
:raises ValidationException
"""
logger.info("checking Count the no of masters")
node_info = config_utils.get_nodes_ip_name_type(config)
master_count = 0
for hostname, ip, node_type in node_info:
if node_type == consts.NODE_TYPE_MASTER:
master_count += 1
if master_count % 2 == 1 and master_count > 0:
return
raise ValidationException("Number of masters must be odd") | def validate_countmasters(config):
"""
Raises an ValidationException when the master node count is even or < 1
:param config: the k8s config dict
:raises ValidationException
"""
logger.info("checking Count the no of masters")
node_info = config_utils.get_nodes_ip_name_type(config)
master_count = 0
for hostname, ip, node_type in node_info:
if node_type == consts.NODE_TYPE_MASTER:
master_count += 1
if master_count % 2 == 1 and master_count > 0:
return
raise ValidationException("Number of masters must be odd") |
Python | def validate_access_and_security_params(config):
"""
Checks the presence of access_and_security parameters
"""
logger.info("checking basic_authentication params")
sec_params = config_utils.get_k8s_dict(config).get(consts.ACCESS_SEC_KEY)
if consts.AUTH_KEY in sec_params:
auth_key = sec_params[consts.AUTH_KEY]
if (consts.BASIC_AUTH_KEY not in auth_key
or consts.TOKEN_AUTH_KEY not in auth_key):
raise ValidationException(
"Atleast one out of basic_authentication or "
"token_authentication must be present")
else:
return
else:
raise ValidationException("authentication is not present") | def validate_access_and_security_params(config):
"""
Checks the presence of access_and_security parameters
"""
logger.info("checking basic_authentication params")
sec_params = config_utils.get_k8s_dict(config).get(consts.ACCESS_SEC_KEY)
if consts.AUTH_KEY in sec_params:
auth_key = sec_params[consts.AUTH_KEY]
if (consts.BASIC_AUTH_KEY not in auth_key
or consts.TOKEN_AUTH_KEY not in auth_key):
raise ValidationException(
"Atleast one out of basic_authentication or "
"token_authentication must be present")
else:
return
else:
raise ValidationException("authentication is not present") |
Python | def validate_node_config_tag(config):
"""
Checks the presence of node configuration tag
"""
logger.info("checking node config tag")
k8s_dict = config_utils.get_k8s_dict(config)
validate_dict_data(k8s_dict, consts.NODE_CONF_KEY) | def validate_node_config_tag(config):
"""
Checks the presence of node configuration tag
"""
logger.info("checking node config tag")
k8s_dict = config_utils.get_k8s_dict(config)
validate_dict_data(k8s_dict, consts.NODE_CONF_KEY) |
Python | def validate_node_config_params(config):
"""
Checks the presence of node configuration parameters
"""
logger.info("checking node configuration params")
node_configs = config_utils.get_node_configs(config)
validate_dict_data(node_configs[0], consts.HOST_KEY)
for node_conf in node_configs:
host_conf = node_conf[consts.HOST_KEY]
validate_dict_data(host_conf, consts.HOSTNAME_KEY)
validate_dict_data(host_conf, consts.IP_KEY)
validate_dict_data(host_conf, consts.NODE_TYPE_KEY)
validate_dict_data(host_conf, consts.LABEL_KEY)
validate_dict_data(host_conf, consts.LBL_VAL_KEY)
if consts.REG_PORT_KEY in host_conf:
node_type = host_conf[consts.NODE_TYPE_KEY]
if not (node_type != consts.NODE_TYPE_MASTER
or node_type != consts.NODE_TYPE_MINION):
raise ValidationException(
'Node type should be either master or minion')
validate_dict_data(host_conf, consts.PASSWORD_KEY)
validate_dict_data(host_conf, consts.USER_KEY) | def validate_node_config_params(config):
"""
Checks the presence of node configuration parameters
"""
logger.info("checking node configuration params")
node_configs = config_utils.get_node_configs(config)
validate_dict_data(node_configs[0], consts.HOST_KEY)
for node_conf in node_configs:
host_conf = node_conf[consts.HOST_KEY]
validate_dict_data(host_conf, consts.HOSTNAME_KEY)
validate_dict_data(host_conf, consts.IP_KEY)
validate_dict_data(host_conf, consts.NODE_TYPE_KEY)
validate_dict_data(host_conf, consts.LABEL_KEY)
validate_dict_data(host_conf, consts.LBL_VAL_KEY)
if consts.REG_PORT_KEY in host_conf:
node_type = host_conf[consts.NODE_TYPE_KEY]
if not (node_type != consts.NODE_TYPE_MASTER
or node_type != consts.NODE_TYPE_MINION):
raise ValidationException(
'Node type should be either master or minion')
validate_dict_data(host_conf, consts.PASSWORD_KEY)
validate_dict_data(host_conf, consts.USER_KEY) |
Python | def validate_docker_repo_tag(config):
"""
Checks the presence of docker repo tag
"""
logger.info("checking docker repo tag")
k8s_dict = config.get(consts.K8S_KEY)
validate_dict_data(k8s_dict, consts.DOCKER_REPO_KEY) | def validate_docker_repo_tag(config):
"""
Checks the presence of docker repo tag
"""
logger.info("checking docker repo tag")
k8s_dict = config.get(consts.K8S_KEY)
validate_dict_data(k8s_dict, consts.DOCKER_REPO_KEY) |
Python | def validate_docker_repo_params(config):
"""
Checks the presence of docker repo parameters
"""
logger.info("checking docker repo params")
docker_repo_params = config_utils.get_docker_repo(config)
validate_dict_data(docker_repo_params, consts.IP_KEY)
validate_dict_data(docker_repo_params, consts.PASSWORD_KEY)
validate_dict_data(docker_repo_params, consts.USER_KEY)
validate_dict_data(docker_repo_params, consts.PORT_KEY) | def validate_docker_repo_params(config):
"""
Checks the presence of docker repo parameters
"""
logger.info("checking docker repo params")
docker_repo_params = config_utils.get_docker_repo(config)
validate_dict_data(docker_repo_params, consts.IP_KEY)
validate_dict_data(docker_repo_params, consts.PASSWORD_KEY)
validate_dict_data(docker_repo_params, consts.USER_KEY)
validate_dict_data(docker_repo_params, consts.PORT_KEY) |
Python | def validate_proxy_params(config):
"""
Checks the presence of proxy parameters
"""
logger.info("checking proxy params")
proxy_params = config_utils.get_proxy_dict(config)
validate_dict_data(proxy_params, consts.FTP_PROXY_KEY)
validate_dict_data(proxy_params, consts.HTTP_PROXY_KEY)
validate_dict_data(proxy_params, consts.HTTPS_PROXY_KEY)
validate_dict_data(proxy_params, consts.NO_PROXY_KEY) | def validate_proxy_params(config):
"""
Checks the presence of proxy parameters
"""
logger.info("checking proxy params")
proxy_params = config_utils.get_proxy_dict(config)
validate_dict_data(proxy_params, consts.FTP_PROXY_KEY)
validate_dict_data(proxy_params, consts.HTTP_PROXY_KEY)
validate_dict_data(proxy_params, consts.HTTPS_PROXY_KEY)
validate_dict_data(proxy_params, consts.NO_PROXY_KEY) |
Python | def validate_network_tag(config):
"""
Checks the presence of network tag
"""
logger.info("checking networks tag")
k8s_dict = config_utils.get_k8s_dict(config)
validate_dict_data(k8s_dict, consts.NETWORKS_KEY) | def validate_network_tag(config):
"""
Checks the presence of network tag
"""
logger.info("checking networks tag")
k8s_dict = config_utils.get_k8s_dict(config)
validate_dict_data(k8s_dict, consts.NETWORKS_KEY) |
Python | def validate_default_network_params(config):
"""
Checks the presence of default network tag and its parameters
"""
logger.info("checking def networks params")
default_net = config_utils.get_default_network(config)
if not default_net:
raise ValidationException('Default network must be defined')
validate_dict_data(default_net, consts.NET_PLUGIN_KEY)
validate_dict_data(default_net, consts.MASTER_PLUGIN_KEY)
validate_dict_data(default_net, consts.SRVC_SUB_KEY)
validate_dict_data(default_net, consts.POD_SUB_KEY)
validate_dict_data(default_net, consts.NETWORK_NAME_KEY) | def validate_default_network_params(config):
"""
Checks the presence of default network tag and its parameters
"""
logger.info("checking def networks params")
default_net = config_utils.get_default_network(config)
if not default_net:
raise ValidationException('Default network must be defined')
validate_dict_data(default_net, consts.NET_PLUGIN_KEY)
validate_dict_data(default_net, consts.MASTER_PLUGIN_KEY)
validate_dict_data(default_net, consts.SRVC_SUB_KEY)
validate_dict_data(default_net, consts.POD_SUB_KEY)
validate_dict_data(default_net, consts.NETWORK_NAME_KEY) |
Python | def validate_multus_network_cni(config):
"""
Checks the presence of CNI tag in Multus network and also checks
presence of multus network tag
"""
logger.info("checking multus networks CNI ")
mult_nets = config_utils.get_multus_network(config)
for mult_net in mult_nets:
if consts.MULTUS_CNI_KEY in mult_net:
return
raise ValidationException(
'{} config does not exist'.format(consts.MULTUS_CNI_KEY)) | def validate_multus_network_cni(config):
"""
Checks the presence of CNI tag in Multus network and also checks
presence of multus network tag
"""
logger.info("checking multus networks CNI ")
mult_nets = config_utils.get_multus_network(config)
for mult_net in mult_nets:
if consts.MULTUS_CNI_KEY in mult_net:
return
raise ValidationException(
'{} config does not exist'.format(consts.MULTUS_CNI_KEY)) |
Python | def validate_multus_network_cni_conf(config):
"""
Checks the presence of CNI Configuration tag in Multus network
and also checks presence of multus network tag
"""
logger.info("checking multus networks CNI CONF tag")
mult_nets = config_utils.get_multus_network(config)
for mult_net in mult_nets:
if consts.MULTUS_CNI_CONFIG_KEY in mult_net:
return
raise ValidationException('{} config does not exist'.format(
consts.MULTUS_CNI_CONFIG_KEY)) | def validate_multus_network_cni_conf(config):
"""
Checks the presence of CNI Configuration tag in Multus network
and also checks presence of multus network tag
"""
logger.info("checking multus networks CNI CONF tag")
mult_nets = config_utils.get_multus_network(config)
for mult_net in mult_nets:
if consts.MULTUS_CNI_CONFIG_KEY in mult_net:
return
raise ValidationException('{} config does not exist'.format(
consts.MULTUS_CNI_CONFIG_KEY)) |
Python | def validate_cni_params(config):
"""
Checks the presence of atleast one plugin in Cni tag
"""
logger.info("checking multus networks params")
net_configs = config_utils.get_networks(config)
cni_params = []
for all_keys in net_configs[1]:
for keys_in_all_keys in net_configs[1][all_keys]:
cni_params.append(keys_in_all_keys)
break
for cni_param in cni_params:
if cni_param.get(consts.MULTUS_CNI_KEY):
if consts.WEAVE_TYPE in cni_param.get(consts.MULTUS_CNI_KEY):
validate_multus_network_weave_params(config)
if consts.FLANNEL_TYPE in cni_param.get(consts.MULTUS_CNI_KEY):
validate_multus_network_flannel_params(config)
if consts.SRIOV_TYPE in cni_param.get(consts.MULTUS_CNI_KEY):
validate_multus_network_sriov_params(config)
if consts.MACVLAN_TYPE in cni_param.get(consts.MULTUS_CNI_KEY):
validate_multus_network_macvlan_params(config)
validate_dhcpmandatory(config) | def validate_cni_params(config):
"""
Checks the presence of atleast one plugin in Cni tag
"""
logger.info("checking multus networks params")
net_configs = config_utils.get_networks(config)
cni_params = []
for all_keys in net_configs[1]:
for keys_in_all_keys in net_configs[1][all_keys]:
cni_params.append(keys_in_all_keys)
break
for cni_param in cni_params:
if cni_param.get(consts.MULTUS_CNI_KEY):
if consts.WEAVE_TYPE in cni_param.get(consts.MULTUS_CNI_KEY):
validate_multus_network_weave_params(config)
if consts.FLANNEL_TYPE in cni_param.get(consts.MULTUS_CNI_KEY):
validate_multus_network_flannel_params(config)
if consts.SRIOV_TYPE in cni_param.get(consts.MULTUS_CNI_KEY):
validate_multus_network_sriov_params(config)
if consts.MACVLAN_TYPE in cni_param.get(consts.MULTUS_CNI_KEY):
validate_multus_network_macvlan_params(config)
validate_dhcpmandatory(config) |
Python | def validate_duplicatein_cni_and_networkplugin(config):
"""
Checks if there exists the same plugin in both default network
plugin tag and in Cni parameters
"""
logger.info("checking duplicate values")
net_configs = config_utils.get_networks(config)
networkpluginvalue = net_configs[0].values()[0][
consts.NET_PLUGIN_KEY]
net_elems = config_utils.get_multus_net_elems(config)
if (consts.WEAVE_TYPE in net_elems
and consts.WEAVE_TYPE == networkpluginvalue):
raise ValidationException("duplicate weave")
if (consts.FLANNEL_TYPE in net_elems
and consts.FLANNEL_TYPE == networkpluginvalue):
raise ValidationException("duplicate flannel")
if (consts.SRIOV_TYPE in net_elems
and consts.SRIOV_TYPE == networkpluginvalue):
raise ValidationException("duplicate Sriov")
if (consts.MACVLAN_TYPE in net_elems
and consts.MACVLAN_TYPE == networkpluginvalue):
raise ValidationException("duplicate macvlan") | def validate_duplicatein_cni_and_networkplugin(config):
"""
Checks if there exists the same plugin in both default network
plugin tag and in Cni parameters
"""
logger.info("checking duplicate values")
net_configs = config_utils.get_networks(config)
networkpluginvalue = net_configs[0].values()[0][
consts.NET_PLUGIN_KEY]
net_elems = config_utils.get_multus_net_elems(config)
if (consts.WEAVE_TYPE in net_elems
and consts.WEAVE_TYPE == networkpluginvalue):
raise ValidationException("duplicate weave")
if (consts.FLANNEL_TYPE in net_elems
and consts.FLANNEL_TYPE == networkpluginvalue):
raise ValidationException("duplicate flannel")
if (consts.SRIOV_TYPE in net_elems
and consts.SRIOV_TYPE == networkpluginvalue):
raise ValidationException("duplicate Sriov")
if (consts.MACVLAN_TYPE in net_elems
and consts.MACVLAN_TYPE == networkpluginvalue):
raise ValidationException("duplicate macvlan") |
Python | def validate_multus_network_flannel_params(config):
"""
Checks the presence of Flannel network parameters
"""
logger.info("checking flannelnet params")
flannel_nets = config_utils.get_multus_cni_flannel_cfgs(config)
if len(flannel_nets) == 0:
raise ValidationException('Requires at least one flannel network')
for flannel_net in flannel_nets:
validate_dict_data(flannel_net, consts.FLANNEL_NET_DTLS_KEY)
flannel_dtls = flannel_net[consts.FLANNEL_NET_DTLS_KEY]
validate_dict_data(flannel_dtls, consts.MASTER_PLUGIN_KEY)
validate_dict_data(flannel_dtls, consts.NETWORK_NAME_KEY)
validate_dict_data(flannel_dtls, consts.NETWORK_KEY)
validate_dict_data(flannel_dtls, consts.SUBNET_KEY) | def validate_multus_network_flannel_params(config):
"""
Checks the presence of Flannel network parameters
"""
logger.info("checking flannelnet params")
flannel_nets = config_utils.get_multus_cni_flannel_cfgs(config)
if len(flannel_nets) == 0:
raise ValidationException('Requires at least one flannel network')
for flannel_net in flannel_nets:
validate_dict_data(flannel_net, consts.FLANNEL_NET_DTLS_KEY)
flannel_dtls = flannel_net[consts.FLANNEL_NET_DTLS_KEY]
validate_dict_data(flannel_dtls, consts.MASTER_PLUGIN_KEY)
validate_dict_data(flannel_dtls, consts.NETWORK_NAME_KEY)
validate_dict_data(flannel_dtls, consts.NETWORK_KEY)
validate_dict_data(flannel_dtls, consts.SUBNET_KEY) |
Python | def validate_multus_network_macvlan_params(config):
"""
Checks the presence of Macvlan parameters also check Macvlan
network name format and validations of "type"
"""
logger.info("checking Macvlan params")
macvlan_nets = config_utils.get_multus_cni_macvlan_cfgs(config)
if len(macvlan_nets) == 0:
raise ValidationException('At least one macvlan network required')
for macvlan_net in macvlan_nets:
macvlan_conf = macvlan_net[consts.MACVLAN_NET_DTLS_KEY]
validate_dict_data(macvlan_conf, consts.MACVLAN_PARENT_INTF_KEY)
validate_dict_data(macvlan_conf, consts.HOSTNAME_KEY)
validate_dict_data(macvlan_conf, consts.IP_KEY)
validate_dict_data(macvlan_conf, consts.NODE_TYPE_MASTER)
validate_dict_data(macvlan_conf, consts.TYPE_KEY)
validate_dict_data(macvlan_conf, consts.NETWORK_NAME_KEY)
net_name = macvlan_conf[consts.NETWORK_NAME_KEY]
to_find = "_"
count = net_name.find(to_find)
count2 = len(filter(lambda x: x in string.uppercase, net_name))
if not (count < 1 and count2 < 1):
raise ValidationException("Network_name value format is wrong ")
if macvlan_conf[consts.TYPE_KEY] == consts.NET_TYPE_LOCAL_TYPE:
validate_dict_data(macvlan_conf, consts.RANGE_END_KEY)
validate_dict_data(macvlan_conf, consts.RANGE_START_KEY)
validate_dict_data(macvlan_conf, consts.ROUTES_DST_KEY)
validate_dict_data(macvlan_conf, consts.SUBNET_KEY)
validate_dict_data(macvlan_conf, consts.GATEWAY_KEY) | def validate_multus_network_macvlan_params(config):
"""
Checks the presence of Macvlan parameters also check Macvlan
network name format and validations of "type"
"""
logger.info("checking Macvlan params")
macvlan_nets = config_utils.get_multus_cni_macvlan_cfgs(config)
if len(macvlan_nets) == 0:
raise ValidationException('At least one macvlan network required')
for macvlan_net in macvlan_nets:
macvlan_conf = macvlan_net[consts.MACVLAN_NET_DTLS_KEY]
validate_dict_data(macvlan_conf, consts.MACVLAN_PARENT_INTF_KEY)
validate_dict_data(macvlan_conf, consts.HOSTNAME_KEY)
validate_dict_data(macvlan_conf, consts.IP_KEY)
validate_dict_data(macvlan_conf, consts.NODE_TYPE_MASTER)
validate_dict_data(macvlan_conf, consts.TYPE_KEY)
validate_dict_data(macvlan_conf, consts.NETWORK_NAME_KEY)
net_name = macvlan_conf[consts.NETWORK_NAME_KEY]
to_find = "_"
count = net_name.find(to_find)
count2 = len(filter(lambda x: x in string.uppercase, net_name))
if not (count < 1 and count2 < 1):
raise ValidationException("Network_name value format is wrong ")
if macvlan_conf[consts.TYPE_KEY] == consts.NET_TYPE_LOCAL_TYPE:
validate_dict_data(macvlan_conf, consts.RANGE_END_KEY)
validate_dict_data(macvlan_conf, consts.RANGE_START_KEY)
validate_dict_data(macvlan_conf, consts.ROUTES_DST_KEY)
validate_dict_data(macvlan_conf, consts.SUBNET_KEY)
validate_dict_data(macvlan_conf, consts.GATEWAY_KEY) |
Python | def validate_multus_network_sriov_params(config):
"""
Checks the presence of Sriov parameters and validations of "type"
"""
logger.info("checking SRIOV params")
sriov_hosts = config_utils.get_multus_cni_sriov_cfgs(config)
for sriov_host in sriov_hosts:
validate_dict_data(sriov_host, consts.HOST_KEY)
host_data = sriov_host[consts.HOST_KEY]
validate_dict_data(host_data, consts.SRIOV_NETWORKS_KEY)
net_configs = host_data[consts.SRIOV_NETWORKS_KEY]
for net_config in net_configs:
validate_dict_data(net_config, consts.MASTER_PLUGIN_KEY)
validate_dict_data(net_config, consts.HOSTNAME_KEY)
validate_dict_data(net_config, consts.NETWORK_NAME_KEY)
validate_dict_data(net_config, consts.TYPE_KEY)
if net_config[consts.TYPE_KEY] == consts.NET_TYPE_LOCAL_TYPE:
validate_dict_data(net_config, consts.RANGE_START_KEY)
validate_dict_data(net_config, consts.RANGE_END_KEY)
validate_dict_data(net_config, consts.SRIOV_INTF_KEY)
validate_dict_data(net_config, consts.NETWORK_NAME_KEY)
validate_dict_data(net_config, consts.SRIOV_DPDK_ENABLE_KEY)
validate_dict_data(net_config, consts.SRIOV_GATEWAY_KEY)
validate_dict_data(net_config, consts.SRIOV_SUBNET_KEY) | def validate_multus_network_sriov_params(config):
"""
Checks the presence of Sriov parameters and validations of "type"
"""
logger.info("checking SRIOV params")
sriov_hosts = config_utils.get_multus_cni_sriov_cfgs(config)
for sriov_host in sriov_hosts:
validate_dict_data(sriov_host, consts.HOST_KEY)
host_data = sriov_host[consts.HOST_KEY]
validate_dict_data(host_data, consts.SRIOV_NETWORKS_KEY)
net_configs = host_data[consts.SRIOV_NETWORKS_KEY]
for net_config in net_configs:
validate_dict_data(net_config, consts.MASTER_PLUGIN_KEY)
validate_dict_data(net_config, consts.HOSTNAME_KEY)
validate_dict_data(net_config, consts.NETWORK_NAME_KEY)
validate_dict_data(net_config, consts.TYPE_KEY)
if net_config[consts.TYPE_KEY] == consts.NET_TYPE_LOCAL_TYPE:
validate_dict_data(net_config, consts.RANGE_START_KEY)
validate_dict_data(net_config, consts.RANGE_END_KEY)
validate_dict_data(net_config, consts.SRIOV_INTF_KEY)
validate_dict_data(net_config, consts.NETWORK_NAME_KEY)
validate_dict_data(net_config, consts.SRIOV_DPDK_ENABLE_KEY)
validate_dict_data(net_config, consts.SRIOV_GATEWAY_KEY)
validate_dict_data(net_config, consts.SRIOV_SUBNET_KEY) |
Python | def validate_multus_network_weave_params(config):
"""
Checks the presence of weave parameters
"""
logger.info("checking weave_params params")
weave_nets = config_utils.get_multus_cni_weave_cfgs(config)
for weave_net in weave_nets:
weave_details = weave_net[consts.WEAVE_NET_DTLS_KEY]
validate_dict_data(weave_details, consts.MASTER_PLUGIN_KEY)
validate_dict_data(weave_details, consts.NETWORK_NAME_KEY)
validate_dict_data(weave_details, consts.SUBNET_KEY) | def validate_multus_network_weave_params(config):
"""
Checks the presence of weave parameters
"""
logger.info("checking weave_params params")
weave_nets = config_utils.get_multus_cni_weave_cfgs(config)
for weave_net in weave_nets:
weave_details = weave_net[consts.WEAVE_NET_DTLS_KEY]
validate_dict_data(weave_details, consts.MASTER_PLUGIN_KEY)
validate_dict_data(weave_details, consts.NETWORK_NAME_KEY)
validate_dict_data(weave_details, consts.SUBNET_KEY) |
Python | def validate_ceph_vol_params(config):
"""
Checks the presence of Ceph Volume parameters
"""
logger.info("checking ceph_vol_params")
ceph_vols = config_utils.get_ceph_vol(config)
for ceph_vol in ceph_vols:
validate_dict_data(ceph_vol, consts.HOST_KEY)
ceph_host = ceph_vol[consts.HOST_KEY]
validate_dict_data(ceph_host, consts.IP_KEY)
validate_dict_data(ceph_host, consts.NODE_TYPE_KEY)
validate_dict_data(ceph_host, consts.PASSWORD_KEY)
validate_dict_data(ceph_host, consts.USER_KEY) | def validate_ceph_vol_params(config):
"""
Checks the presence of Ceph Volume parameters
"""
logger.info("checking ceph_vol_params")
ceph_vols = config_utils.get_ceph_vol(config)
for ceph_vol in ceph_vols:
validate_dict_data(ceph_vol, consts.HOST_KEY)
ceph_host = ceph_vol[consts.HOST_KEY]
validate_dict_data(ceph_host, consts.IP_KEY)
validate_dict_data(ceph_host, consts.NODE_TYPE_KEY)
validate_dict_data(ceph_host, consts.PASSWORD_KEY)
validate_dict_data(ceph_host, consts.USER_KEY) |
Python | def validate_nodetype_data(config):
"""
Checks the presence of nodetype datatype
"""
logger.info("checking nodetype_data")
ceph_vol_hosts = config_utils.get_ceph_vol(config)
for ceph_vol_host in ceph_vol_hosts:
host_conf = ceph_vol_host[consts.HOST_KEY]
validate_dict_data(host_conf, consts.NODE_TYPE_KEY)
node_type = host_conf[consts.NODE_TYPE_KEY]
if (node_type != consts.CEPH_CTRL_TYPE
and node_type != consts.CEPH_OSD_TYPE):
raise ValidationException(
'Ceph node type is not {} or {}'.format(
consts.CEPH_CTRL_TYPE, consts.CEPH_OSD_TYPE)) | def validate_nodetype_data(config):
"""
Checks the presence of nodetype datatype
"""
logger.info("checking nodetype_data")
ceph_vol_hosts = config_utils.get_ceph_vol(config)
for ceph_vol_host in ceph_vol_hosts:
host_conf = ceph_vol_host[consts.HOST_KEY]
validate_dict_data(host_conf, consts.NODE_TYPE_KEY)
node_type = host_conf[consts.NODE_TYPE_KEY]
if (node_type != consts.CEPH_CTRL_TYPE
and node_type != consts.CEPH_OSD_TYPE):
raise ValidationException(
'Ceph node type is not {} or {}'.format(
consts.CEPH_CTRL_TYPE, consts.CEPH_OSD_TYPE)) |
Python | def validate_ceph_controller_params(config):
"""
Checks the presence of Ceph Controller parameters for ceph claim
"""
logger.info("checking ceph_controller_params")
ceph_hosts = config_utils.get_ceph_vol(config)
for ceph_host in ceph_hosts:
ceph_host_data = ceph_host[consts.HOST_KEY]
if ceph_host_data[consts.NODE_TYPE_KEY] == consts.CEPH_CTRL_TYPE:
validate_dict_data(ceph_host_data, consts.CEPH_CLAIMS_KEY)
claims = ceph_host_data[consts.CEPH_CLAIMS_KEY]
for claim in claims:
claim_params = claim[consts.CLAIM_PARAMS_KEY]
validate_dict_data(claim_params, consts.CEPH_CLAIM_NAME_KEY)
validate_dict_data(claim_params, consts.CEPH_STORAGE_KEY)
else:
validate_dict_data(ceph_host_data, consts.STORAGE_TYPE_KEY) | def validate_ceph_controller_params(config):
"""
Checks the presence of Ceph Controller parameters for ceph claim
"""
logger.info("checking ceph_controller_params")
ceph_hosts = config_utils.get_ceph_vol(config)
for ceph_host in ceph_hosts:
ceph_host_data = ceph_host[consts.HOST_KEY]
if ceph_host_data[consts.NODE_TYPE_KEY] == consts.CEPH_CTRL_TYPE:
validate_dict_data(ceph_host_data, consts.CEPH_CLAIMS_KEY)
claims = ceph_host_data[consts.CEPH_CLAIMS_KEY]
for claim in claims:
claim_params = claim[consts.CLAIM_PARAMS_KEY]
validate_dict_data(claim_params, consts.CEPH_CLAIM_NAME_KEY)
validate_dict_data(claim_params, consts.CEPH_STORAGE_KEY)
else:
validate_dict_data(ceph_host_data, consts.STORAGE_TYPE_KEY) |
Python | def validate_ceph_osd_params(config):
"""
Checks the presence of Ceph osd parameters foe secondary storage presence
"""
logger.info("checking ceph_osd_params")
ceph_hosts = config_utils.get_ceph_vol(config)
for ceph_host in ceph_hosts:
ceph_host_data = ceph_host[consts.HOST_KEY]
if ceph_host_data[consts.NODE_TYPE_KEY] == consts.CEPH_OSD_TYPE:
if (consts.CEPH_CLAIMS_KEY not in ceph_host_data
and consts.STORAGE_TYPE_KEY in ceph_host_data):
return
else:
raise ValidationException(
"for ceph_osd only secondary storage should be present") | def validate_ceph_osd_params(config):
"""
Checks the presence of Ceph osd parameters foe secondary storage presence
"""
logger.info("checking ceph_osd_params")
ceph_hosts = config_utils.get_ceph_vol(config)
for ceph_host in ceph_hosts:
ceph_host_data = ceph_host[consts.HOST_KEY]
if ceph_host_data[consts.NODE_TYPE_KEY] == consts.CEPH_OSD_TYPE:
if (consts.CEPH_CLAIMS_KEY not in ceph_host_data
and consts.STORAGE_TYPE_KEY in ceph_host_data):
return
else:
raise ValidationException(
"for ceph_osd only secondary storage should be present") |
Python | def validate_dhcpmandatory(config):
"""
Checks the presence of DHCP CNI Plugin with Multus, if SRIOV or Multus
uses dhcp as network type
"""
logger.info("checking dhcp mandatory values")
has_dhcp = False
macvlan_nets = config_utils.get_multus_cni_macvlan_cfgs(config)
for macvlan_net in macvlan_nets:
macvlan_conf = macvlan_net[consts.MACVLAN_NET_DTLS_KEY]
if macvlan_conf[consts.TYPE_KEY] == consts.DHCP_TYPE:
has_dhcp = True
break
if not has_dhcp:
sriov_nets = config_utils.get_multus_cni_sriov_cfgs(config)
for sriov_net in sriov_nets:
sriov_conf = sriov_net[consts.HOST_KEY]
sriov_net_confs = sriov_conf[consts.SRIOV_NETWORKS_KEY]
for sriov_net_conf in sriov_net_confs:
if sriov_net_conf[consts.TYPE_KEY] == consts.DHCP_TYPE:
has_dhcp = True
break
if has_dhcp:
cni_types = config_utils.get_multus_net_elems(config)
if consts.DHCP_TYPE not in cni_types:
raise ValidationException(
'DHCP must be in the {} list'.format(consts.MULTUS_CNI_KEY)) | def validate_dhcpmandatory(config):
"""
Checks the presence of DHCP CNI Plugin with Multus, if SRIOV or Multus
uses dhcp as network type
"""
logger.info("checking dhcp mandatory values")
has_dhcp = False
macvlan_nets = config_utils.get_multus_cni_macvlan_cfgs(config)
for macvlan_net in macvlan_nets:
macvlan_conf = macvlan_net[consts.MACVLAN_NET_DTLS_KEY]
if macvlan_conf[consts.TYPE_KEY] == consts.DHCP_TYPE:
has_dhcp = True
break
if not has_dhcp:
sriov_nets = config_utils.get_multus_cni_sriov_cfgs(config)
for sriov_net in sriov_nets:
sriov_conf = sriov_net[consts.HOST_KEY]
sriov_net_confs = sriov_conf[consts.SRIOV_NETWORKS_KEY]
for sriov_net_conf in sriov_net_confs:
if sriov_net_conf[consts.TYPE_KEY] == consts.DHCP_TYPE:
has_dhcp = True
break
if has_dhcp:
cni_types = config_utils.get_multus_net_elems(config)
if consts.DHCP_TYPE not in cni_types:
raise ValidationException(
'DHCP must be in the {} list'.format(consts.MULTUS_CNI_KEY)) |
Python | def ismaster_count_for_deployment(config):
"""
Checks the presence of master fag must be true atleast once in deployment
"""
master_count = 0
dflt_net = config_utils.get_default_network(config)
if config_utils.bool_val(dflt_net[consts.MASTER_PLUGIN_KEY]):
master_count += 1
multus_nets = config_utils.get_multus_cni_cfgs(config)
for multus_net in multus_nets:
if consts.FLANNEL_NET_TYPE in multus_net:
networks = multus_net[consts.FLANNEL_NET_TYPE]
for network in networks:
validate_dict_data(network, consts.FLANNEL_NET_DTLS_KEY)
details = network[consts.FLANNEL_NET_DTLS_KEY]
if config_utils.bool_val(details[consts.MASTER_PLUGIN_KEY]):
master_count += 1
if consts.WEAVE_NET_TYPE in multus_net:
networks = multus_net[consts.WEAVE_NET_TYPE]
for network in networks:
validate_dict_data(network, consts.WEAVE_NET_DTLS_KEY)
details = network[consts.WEAVE_NET_DTLS_KEY]
if config_utils.bool_val(details[consts.MASTER_PLUGIN_KEY]):
master_count += 1
if consts.MACVLAN_NET_TYPE in multus_net:
networks = multus_net[consts.MACVLAN_NET_TYPE]
for network in networks:
validate_dict_data(network, consts.MACVLAN_NET_DTLS_KEY)
details = network[consts.MACVLAN_NET_DTLS_KEY]
if config_utils.bool_val(details[consts.MASTER_PLUGIN_KEY]):
master_count += 1
if consts.SRIOV_NET_TYPE in multus_net:
net_hosts = multus_net[consts.SRIOV_NET_TYPE]
for net_host in net_hosts:
validate_dict_data(net_host, consts.HOST_KEY)
host_conf = net_host[consts.HOST_KEY]
validate_dict_data(host_conf, consts.SRIOV_NETWORKS_KEY)
networks = host_conf[consts.SRIOV_NETWORKS_KEY]
for network in networks:
if config_utils.bool_val(network[
consts.MASTER_PLUGIN_KEY]):
master_count += 1
if master_count > 1:
raise ValidationException('isMaster is present more than once') | def ismaster_count_for_deployment(config):
"""
Checks the presence of master fag must be true atleast once in deployment
"""
master_count = 0
dflt_net = config_utils.get_default_network(config)
if config_utils.bool_val(dflt_net[consts.MASTER_PLUGIN_KEY]):
master_count += 1
multus_nets = config_utils.get_multus_cni_cfgs(config)
for multus_net in multus_nets:
if consts.FLANNEL_NET_TYPE in multus_net:
networks = multus_net[consts.FLANNEL_NET_TYPE]
for network in networks:
validate_dict_data(network, consts.FLANNEL_NET_DTLS_KEY)
details = network[consts.FLANNEL_NET_DTLS_KEY]
if config_utils.bool_val(details[consts.MASTER_PLUGIN_KEY]):
master_count += 1
if consts.WEAVE_NET_TYPE in multus_net:
networks = multus_net[consts.WEAVE_NET_TYPE]
for network in networks:
validate_dict_data(network, consts.WEAVE_NET_DTLS_KEY)
details = network[consts.WEAVE_NET_DTLS_KEY]
if config_utils.bool_val(details[consts.MASTER_PLUGIN_KEY]):
master_count += 1
if consts.MACVLAN_NET_TYPE in multus_net:
networks = multus_net[consts.MACVLAN_NET_TYPE]
for network in networks:
validate_dict_data(network, consts.MACVLAN_NET_DTLS_KEY)
details = network[consts.MACVLAN_NET_DTLS_KEY]
if config_utils.bool_val(details[consts.MASTER_PLUGIN_KEY]):
master_count += 1
if consts.SRIOV_NET_TYPE in multus_net:
net_hosts = multus_net[consts.SRIOV_NET_TYPE]
for net_host in net_hosts:
validate_dict_data(net_host, consts.HOST_KEY)
host_conf = net_host[consts.HOST_KEY]
validate_dict_data(host_conf, consts.SRIOV_NETWORKS_KEY)
networks = host_conf[consts.SRIOV_NETWORKS_KEY]
for network in networks:
if config_utils.bool_val(network[
consts.MASTER_PLUGIN_KEY]):
master_count += 1
if master_count > 1:
raise ValidationException('isMaster is present more than once') |
Python | def is_multus_cni_enabled(k8s_conf):
"""
Returns the status of Multus CNI configuration
:param k8s_conf: the config dict
:return: a boolean
"""
sriov_cni = False
flannel_cni = False
weave_cni = False
macvlan_cni = False
multus_cni = get_multus_net_elems(k8s_conf)
for cni in multus_cni:
if consts.SRIOV_TYPE == cni:
sriov_cni = True
elif consts.FLANNEL_TYPE == cni:
flannel_cni = True
elif consts.WEAVE_TYPE == cni:
weave_cni = True
elif consts.MACVLAN_TYPE == cni:
macvlan_cni = True
return sriov_cni or flannel_cni or weave_cni or macvlan_cni | def is_multus_cni_enabled(k8s_conf):
"""
Returns the status of Multus CNI configuration
:param k8s_conf: the config dict
:return: a boolean
"""
sriov_cni = False
flannel_cni = False
weave_cni = False
macvlan_cni = False
multus_cni = get_multus_net_elems(k8s_conf)
for cni in multus_cni:
if consts.SRIOV_TYPE == cni:
sriov_cni = True
elif consts.FLANNEL_TYPE == cni:
flannel_cni = True
elif consts.WEAVE_TYPE == cni:
weave_cni = True
elif consts.MACVLAN_TYPE == cni:
macvlan_cni = True
return sriov_cni or flannel_cni or weave_cni or macvlan_cni |
Python | def is_rook_enabled(k8s_conf):
"""
Returns True if rook has PVs configured
:param k8s_conf: the configuration dict
:return: T/F
"""
return len(get_rook_vols(k8s_conf)) > 0 | def is_rook_enabled(k8s_conf):
"""
Returns True if rook has PVs configured
:param k8s_conf: the configuration dict
:return: T/F
"""
return len(get_rook_vols(k8s_conf)) > 0 |
Python | def is_logging_enabled(k8s_conf):
"""
Returns T/F based on the kubernetes.enable_logging value
:param k8s_conf: the configuration dict
:return: T/F
"""
value = get_k8s_dict(k8s_conf).get(consts.ENABLE_LOG_KEY, False)
return bool_val(value) | def is_logging_enabled(k8s_conf):
"""
Returns T/F based on the kubernetes.enable_logging value
:param k8s_conf: the configuration dict
:return: T/F
"""
value = get_k8s_dict(k8s_conf).get(consts.ENABLE_LOG_KEY, False)
return bool_val(value) |
Python | def is_cpu_alloc(k8s_conf):
"""
Returns T/F based on the kubernetes.enable_logging value
:param k8s_conf: the configuration dict
:return: T/F
"""
value = get_k8s_dict(k8s_conf).get(consts.CPU_ALLOC_KEY, False)
return bool_val(value) | def is_cpu_alloc(k8s_conf):
"""
Returns T/F based on the kubernetes.enable_logging value
:param k8s_conf: the configuration dict
:return: T/F
"""
value = get_k8s_dict(k8s_conf).get(consts.CPU_ALLOC_KEY, False)
return bool_val(value) |
Python | def is_metrics_server_enabled(k8s_conf):
"""
Returns T/F based on the kubernetes.enable_logging value
:param k8s_conf: the configuration dict
:return: T/F
"""
value = get_k8s_dict(k8s_conf).get(consts.METRICS_SERVER_KEY, False)
return bool_val(value) | def is_metrics_server_enabled(k8s_conf):
"""
Returns T/F based on the kubernetes.enable_logging value
:param k8s_conf: the configuration dict
:return: T/F
"""
value = get_k8s_dict(k8s_conf).get(consts.METRICS_SERVER_KEY, False)
return bool_val(value) |
Python | def is_helm_enabled(k8s_conf):
"""
Returns T/F based on the kubernetes.enable_logging value
:param k8s_conf: the configuration dict
:return: T/F
"""
value = get_k8s_dict(k8s_conf).get(consts.HELM_ENABLED_KEY, False)
return bool_val(value) | def is_helm_enabled(k8s_conf):
"""
Returns T/F based on the kubernetes.enable_logging value
:param k8s_conf: the configuration dict
:return: T/F
"""
value = get_k8s_dict(k8s_conf).get(consts.HELM_ENABLED_KEY, False)
return bool_val(value) |
Python | def persist_config_to_file(config, conf_dir):
"""
Creates a YAML file from a dict
:param config: the dictionary to store
:param conf_dir: the directory used to store the config file
:return: the closed file object
"""
if not os.path.isdir(conf_dir):
os.mkdir(conf_dir)
relative_file_path = "{}/{}".format(conf_dir, str(uuid.uuid4()))
file_path = os.path.expanduser(relative_file_path)
yaml_from_dict = yaml.dump(config, Dumper=yaml.RoundTripDumper)
logger.info('Saving [%s] to file [%s]', yaml_from_dict, file_path)
with open(file_path, 'wb') as save_file:
save_file.write(yaml_from_dict)
logger.info('Closing file [%s]', save_file.name)
save_file.close()
return save_file | def persist_config_to_file(config, conf_dir):
"""
Creates a YAML file from a dict
:param config: the dictionary to store
:param conf_dir: the directory used to store the config file
:return: the closed file object
"""
if not os.path.isdir(conf_dir):
os.mkdir(conf_dir)
relative_file_path = "{}/{}".format(conf_dir, str(uuid.uuid4()))
file_path = os.path.expanduser(relative_file_path)
yaml_from_dict = yaml.dump(config, Dumper=yaml.RoundTripDumper)
logger.info('Saving [%s] to file [%s]', yaml_from_dict, file_path)
with open(file_path, 'wb') as save_file:
save_file.write(yaml_from_dict)
logger.info('Closing file [%s]', save_file.name)
save_file.close()
return save_file |
Python | def __get_node_ips(boot_conf, hb_conf, node_type):
"""
Returns a list of IPs for all nodes of a given type
:param boot_conf: the snaps-boot configuration dict
:param hb_conf: the adrenaline configuration dict
:param node_type: a string denoting the node type ('master'|'minion')
:return: a list of IP addresses or an empty list
"""
out = list()
master_node_names = hb_conf[node_type]
for master_node_name in master_node_names:
out.append(get_node_ip(boot_conf, master_node_name))
return out | def __get_node_ips(boot_conf, hb_conf, node_type):
"""
Returns a list of IPs for all nodes of a given type
:param boot_conf: the snaps-boot configuration dict
:param hb_conf: the adrenaline configuration dict
:param node_type: a string denoting the node type ('master'|'minion')
:return: a list of IP addresses or an empty list
"""
out = list()
master_node_names = hb_conf[node_type]
for master_node_name in master_node_names:
out.append(get_node_ip(boot_conf, master_node_name))
return out |
Python | def __generate_node_config(boot_conf, hb_conf):
"""
Generates the node configuration for snaps-kubernetes
:param boot_conf: the snaps-boot config dict
:param hb_conf: the adrenaline config dict
:return: list of dict containing the configuration of each kubernetes node
"""
out_list = list()
env = Environment(loader=FileSystemLoader(
searchpath=os.path.dirname(consts.K8S_DEPOY_NODE_CONFIG_TMPLT)))
template = env.get_template(
os.path.basename(consts.K8S_DEPOY_NODE_CONFIG_TMPLT))
boot_nodes = __get_boot_node_data(boot_conf, hb_conf)
for boot_node in boot_nodes:
host_dict = yaml.safe_load(template.render(**boot_node))
out_list.append({'host': host_dict})
return out_list | def __generate_node_config(boot_conf, hb_conf):
"""
Generates the node configuration for snaps-kubernetes
:param boot_conf: the snaps-boot config dict
:param hb_conf: the adrenaline config dict
:return: list of dict containing the configuration of each kubernetes node
"""
out_list = list()
env = Environment(loader=FileSystemLoader(
searchpath=os.path.dirname(consts.K8S_DEPOY_NODE_CONFIG_TMPLT)))
template = env.get_template(
os.path.basename(consts.K8S_DEPOY_NODE_CONFIG_TMPLT))
boot_nodes = __get_boot_node_data(boot_conf, hb_conf)
for boot_node in boot_nodes:
host_dict = yaml.safe_load(template.render(**boot_node))
out_list.append({'host': host_dict})
return out_list |
Python | def __get_boot_node_data(boot_conf, hb_conf):
"""
Returns a list of dict objects containing the substitution variables
for each configured node
:param boot_conf: the snaps-boot config
:param hb_conf: the adrenaline config dict
:return: a list of dict for k8s configuration
"""
out_list = list()
boot_hosts = boot_conf['PROVISION']['STATIC']['host']
master_names = hb_conf['masters']
minion_names = hb_conf['minions']
node_passwd = get_node_pass(boot_conf)
for boot_host in boot_hosts:
port = None
node_type = None
if boot_host['name'] in master_names:
node_type = 'master'
port = consts.MASTER_CONFIG_PORT
if boot_host['name'] in minion_names:
node_type = 'minion'
port = consts.MINION_CONFIG_PORT
access_ip = None
boot_type = hb_conf.get('boot_intf_type')
if boot_type:
for interface in boot_host['interfaces']:
if interface['type'] == boot_type:
access_ip = interface['address']
else:
access_ip = boot_host['access_ip']
if not access_ip:
raise Exception('Unable to obtain an access IP')
if node_type:
out_list.append({
'hostname': boot_host['name'],
'node_ip': access_ip,
'registry_port': port,
'node_type': node_type,
'label_value': boot_host['name'],
'node_host_pass': node_passwd,
})
port += 1
return out_list | def __get_boot_node_data(boot_conf, hb_conf):
"""
Returns a list of dict objects containing the substitution variables
for each configured node
:param boot_conf: the snaps-boot config
:param hb_conf: the adrenaline config dict
:return: a list of dict for k8s configuration
"""
out_list = list()
boot_hosts = boot_conf['PROVISION']['STATIC']['host']
master_names = hb_conf['masters']
minion_names = hb_conf['minions']
node_passwd = get_node_pass(boot_conf)
for boot_host in boot_hosts:
port = None
node_type = None
if boot_host['name'] in master_names:
node_type = 'master'
port = consts.MASTER_CONFIG_PORT
if boot_host['name'] in minion_names:
node_type = 'minion'
port = consts.MINION_CONFIG_PORT
access_ip = None
boot_type = hb_conf.get('boot_intf_type')
if boot_type:
for interface in boot_host['interfaces']:
if interface['type'] == boot_type:
access_ip = interface['address']
else:
access_ip = boot_host['access_ip']
if not access_ip:
raise Exception('Unable to obtain an access IP')
if node_type:
out_list.append({
'hostname': boot_host['name'],
'node_ip': access_ip,
'registry_port': port,
'node_type': node_type,
'label_value': boot_host['name'],
'node_host_pass': node_passwd,
})
port += 1
return out_list |
Subsets and Splits