code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import redis
import uuid
import json
import textwrap
import shlex
import base64
import signal
import socket
import logging
import time
from . import typchk
DefaultTimeout = 10 # seconds
logger = logging.getLogger('g8core')
class Timeout(Exception):
pass
class JobNotFound(Exception):
pass
class Return:
def __init__(self, payload):
self._payload = payload
@property
def payload(self):
return self._payload
@property
def id(self):
return self._payload['id']
@property
def data(self):
"""
data returned by the process. Only available if process
output data with the correct core level
"""
return self._payload['data']
@property
def level(self):
"""data message level (if any)"""
return self._payload['level']
@property
def starttime(self):
"""timestamp"""
return self._payload['starttime'] / 1000
@property
def time(self):
"""execution time in millisecond"""
return self._payload['time']
@property
def state(self):
"""
exit state
"""
return self._payload['state']
@property
def stdout(self):
streams = self._payload.get('streams', None)
return streams[0] if streams is not None and len(streams) >= 1 else ''
@property
def stderr(self):
streams = self._payload.get('streams', None)
return streams[1] if streams is not None and len(streams) >= 2 else ''
def __repr__(self):
return str(self)
def __str__(self):
tmpl = """\
STATE: {state}
STDOUT:
{stdout}
STDERR:
{stderr}
DATA:
{data}
"""
return textwrap.dedent(tmpl).format(state=self.state, stdout=self.stdout, stderr=self.stderr, data=self.data)
class Response:
def __init__(self, client, id):
self._client = client
self._id = id
self._queue = 'result:{}'.format(id)
@property
def id(self):
return self._id
@property
def exists(self):
r = self._client._redis
flag = '{}:flag'.format(self._queue)
return r.rpoplpush(flag, flag) is not None
def get(self, timeout=None):
if timeout is None:
timeout = self._client.timeout
r = self._client._redis
start = time.time()
maxwait = timeout
while maxwait > 0:
if not self.exists:
raise JobNotFound(self.id)
v = r.brpoplpush(self._queue, self._queue, 10)
if v is not None:
payload = json.loads(v.decode())
r = Return(payload)
logger.debug('%s << %s, stdout="%s", stderr="%s", data="%s"',
self._id, r.state, r.stdout, r.stderr, r.data[:1000])
return r
logger.debug('%s still waiting (%ss)', self._id, int(time.time() - start))
maxwait -= 10
raise Timeout()
class InfoManager:
def __init__(self, client):
self._client = client
def cpu(self):
return self._client.json('info.cpu', {})
def nic(self):
return self._client.json('info.nic', {})
def mem(self):
return self._client.json('info.mem', {})
def disk(self):
return self._client.json('info.disk', {})
def os(self):
return self._client.json('info.os', {})
def port(self):
return self._client.json('info.port', {})
def version(self):
return self._client.json('info.version', {})
class JobManager:
_job_chk = typchk.Checker({
'id': str,
})
_kill_chk = typchk.Checker({
'id': str,
'signal': int,
})
def __init__(self, client):
self._client = client
def list(self, id=None):
"""
List all running jobs
:param id: optional ID for the job to list
"""
args = {'id': id}
self._job_chk.check(args)
return self._client.json('job.list', args)
def kill(self, id, signal=signal.SIGTERM):
"""
Kill a job with given id
:WARNING: beware of what u kill, if u killed redis for example core0 or coreX won't be reachable
:param id: job id to kill
"""
args = {
'id': id,
'signal': int(signal),
}
self._kill_chk.check(args)
return self._client.json('job.kill', args)
class ProcessManager:
_process_chk = typchk.Checker({
'pid': int,
})
_kill_chk = typchk.Checker({
'pid': int,
'signal': int,
})
def __init__(self, client):
self._client = client
def list(self, id=None):
"""
List all running processes
:param id: optional PID for the process to list
"""
args = {'pid': id}
self._process_chk.check(args)
return self._client.json('process.list', args)
def kill(self, pid, signal=signal.SIGTERM):
"""
Kill a process with given pid
:WARNING: beware of what u kill, if u killed redis for example core0 or coreX won't be reachable
:param pid: PID to kill
"""
args = {
'pid': pid,
'signal': int(signal),
}
self._kill_chk.check(args)
return self._client.json('process.kill', args)
class FilesystemManager:
def __init__(self, client):
self._client = client
def open(self, file, mode='r', perm=0o0644):
"""
Opens a file on the node
:param file: file path to open
:param mode: open mode
:param perm: file permission in octet form
mode:
'r' read only
'w' write only (truncate)
'+' read/write
'x' create if not exist
'a' append
:return: a file descriptor
"""
args = {
'file': file,
'mode': mode,
'perm': perm,
}
return self._client.json('filesystem.open', args)
def exists(self, path):
"""
Check if path exists
:param path: path to file/dir
:return: boolean
"""
args = {
'path': path,
}
return self._client.json('filesystem.exists', args)
def list(self, path):
"""
List all entries in directory
:param path: path to dir
:return: list of director entries
"""
args = {
'path': path,
}
return self._client.json('filesystem.list', args)
def mkdir(self, path):
"""
Make a new directory == mkdir -p path
:param path: path to directory to create
:return:
"""
args = {
'path': path,
}
return self._client.json('filesystem.mkdir', args)
def remove(self, path):
"""
Removes a path (recursively)
:param path: path to remove
:return:
"""
args = {
'path': path,
}
return self._client.json('filesystem.remove', args)
def move(self, path, destination):
"""
Move a path to destination
:param path: source
:param destination: destination
:return:
"""
args = {
'path': path,
'destination': destination,
}
return self._client.json('filesystem.move', args)
def chmod(self, path, mode, recursive=False):
"""
Change file/dir permission
:param path: path of file/dir to change
:param mode: octet mode
:param recursive: apply chmod recursively
:return:
"""
args = {
'path': path,
'mode': mode,
'recursive': recursive,
}
return self._client.json('filesystem.chmod', args)
def chown(self, path, user, group, recursive=False):
"""
Change file/dir owner
:param path: path of file/dir
:param user: user name
:param group: group name
:param recursive: apply chown recursively
:return:
"""
args = {
'path': path,
'user': user,
'group': group,
'recursive': recursive,
}
return self._client.json('filesystem.chown', args)
def read(self, fd):
"""
Read a block from the given file descriptor
:param fd: file descriptor
:return: bytes
"""
args = {
'fd': fd,
}
data = self._client.json('filesystem.read', args)
return base64.decodebytes(data.encode())
def write(self, fd, bytes):
"""
Write a block of bytes to an open file descriptor (that is open with one of the writing modes
:param fd: file descriptor
:param bytes: bytes block to write
:return:
:note: don't overkill the node with large byte chunks, also for large file upload check the upload method.
"""
args = {
'fd': fd,
'block': base64.encodebytes(bytes).decode(),
}
return self._client.json('filesystem.write', args)
def close(self, fd):
"""
Close file
:param fd: file descriptor
:return:
"""
args = {
'fd': fd,
}
return self._client.json('filesystem.close', args)
def upload(self, remote, reader):
"""
Uploads a file
:param remote: remote file name
:param reader: an object that implements the read(size) method (typically a file descriptor)
:return:
"""
fd = self.open(remote, 'w')
while True:
chunk = reader.read(512 * 1024)
if chunk == b'':
break
self.write(fd, chunk)
self.close(fd)
def download(self, remote, writer):
"""
Downloads a file
:param remote: remote file name
:param writer: an object the implements the write(bytes) interface (typical a file descriptor)
:return:
"""
fd = self.open(remote)
while True:
chunk = self.read(fd)
if chunk == b'':
break
writer.write(chunk)
self.close(fd)
def upload_file(self, remote, local):
"""
Uploads a file
:param remote: remote file name
:param local: local file name
:return:
"""
file = open(local, 'rb')
self.upload(remote, file)
def download_file(self, remote, local):
"""
Downloads a file
:param remote: remote file name
:param local: local file name
:return:
"""
file = open(local, 'wb')
self.download(remote, file)
class BaseClient:
_system_chk = typchk.Checker({
'name': str,
'args': [str],
'dir': str,
'stdin': str,
'env': typchk.Or(typchk.Map(str, str), typchk.IsNone()),
})
_bash_chk = typchk.Checker({
'stdin': str,
'script': str,
})
def __init__(self, timeout=None):
if timeout is None:
self.timeout = DefaultTimeout
else:
self.timeout = timeout
self._info = InfoManager(self)
self._job = JobManager(self)
self._process = ProcessManager(self)
self._filesystem = FilesystemManager(self)
self._ip = IPManager(self)
@property
def info(self):
return self._info
@property
def job(self):
return self._job
@property
def process(self):
return self._process
@property
def filesystem(self):
return self._filesystem
@property
def ip(self):
return self._ip
def raw(self, command, arguments, queue=None, max_time=None):
"""
Implements the low level command call, this needs to build the command structure
and push it on the correct queue.
:param command: Command name to execute supported by the node (ex: core.system, info.cpu, etc...)
check documentation for list of built in commands
:param arguments: A dict of required command arguments depends on the command name.
:return: Response object
"""
raise NotImplemented()
def sync(self, command, arguments):
"""
Same as self.raw except it do a response.get() waiting for the command execution to finish and reads the result
:return: Result object
"""
response = self.raw(command, arguments)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('invalid response: %s' % result.state, result)
return result
def json(self, command, arguments):
"""
Same as self.sync except it assumes the returned result is json, and loads the payload of the return object
:Return: Data
"""
result = self.sync(command, arguments)
if result.level != 20:
raise RuntimeError('invalid result level, expecting json(20) got (%d)' % result.level)
return json.loads(result.data)
def ping(self):
"""
Ping a node, checking for it's availability. a Ping should never fail unless the node is not reachable
or not responsive.
:return:
"""
response = self.raw('core.ping', {})
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('invalid response: %s' % result.state)
return json.loads(result.data)
def system(self, command, dir='', stdin='', env=None):
"""
Execute a command
:param command: command to execute (with its arguments) ex: `ls -l /root`
:param dir: CWD of command
:param stdin: Stdin data to feed to the command stdin
:param env: dict with ENV variables that will be exported to the command
:return:
"""
parts = shlex.split(command)
if len(parts) == 0:
raise ValueError('invalid command')
args = {
'name': parts[0],
'args': parts[1:],
'dir': dir,
'stdin': stdin,
'env': env,
}
self._system_chk.check(args)
response = self.raw(command='core.system', arguments=args)
return response
def bash(self, script, stdin=''):
"""
Execute a bash script, or run a process inside a bash shell.
:param script: Script to execute (can be multiline script)
:param stdin: Stdin data to feed to the script
:return:
"""
args = {
'script': script,
'stdin': stdin,
}
self._bash_chk.check(args)
response = self.raw(command='bash', arguments=args)
return response
class ContainerClient(BaseClient):
class ContainerZerotierManager:
def __init__(self, client, container):
self._container = container
self._client = client
def info(self):
return self._client.json('corex.zerotier.info', {'container': self._container})
def list(self):
return self._client.json('corex.zerotier.list', {'container': self._container})
_raw_chk = typchk.Checker({
'container': int,
'command': {
'command': str,
'arguments': typchk.Any(),
'queue': typchk.Or(str, typchk.IsNone()),
'max_time': typchk.Or(int, typchk.IsNone()),
}
})
def __init__(self, client, container):
super().__init__(client.timeout)
self._client = client
self._container = container
self._zerotier = ContainerClient.ContainerZerotierManager(client, container) # not (self) we use core0 client
@property
def container(self):
return self._container
@property
def zerotier(self):
return self._zerotier
def raw(self, command, arguments, queue=None, max_time=None):
"""
Implements the low level command call, this needs to build the command structure
and push it on the correct queue.
:param command: Command name to execute supported by the node (ex: core.system, info.cpu, etc...)
check documentation for list of built in commands
:param arguments: A dict of required command arguments depends on the command name.
:return: Response object
"""
args = {
'container': self._container,
'command': {
'command': command,
'arguments': arguments,
'queue': queue,
'max_time': max_time,
},
}
# check input
self._raw_chk.check(args)
response = self._client.raw('corex.dispatch', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to dispatch command to container: %s' % result.data)
cmd_id = json.loads(result.data)
return self._client.response_for(cmd_id)
class ContainerManager:
_create_chk = typchk.Checker({
'root': str,
'mount': typchk.Or(
typchk.Map(str, str),
typchk.IsNone()
),
'host_network': bool,
'nics': [{
'type': typchk.Enum('default', 'bridge', 'zerotier', 'vlan', 'vxlan'),
'id': typchk.Or(str, typchk.Missing()),
'name': typchk.Or(str, typchk.Missing()),
'hwaddr': typchk.Or(str, typchk.Missing()),
'config': typchk.Or(
typchk.Missing,
{
'dhcp': typchk.Or(bool, typchk.Missing()),
'cidr': typchk.Or(str, typchk.Missing()),
'gateway': typchk.Or(str, typchk.Missing()),
'dns': typchk.Or([str], typchk.Missing()),
}
)
}],
'port': typchk.Or(
typchk.Map(int, int),
typchk.IsNone()
),
'privileged': bool,
'hostname': typchk.Or(
str,
typchk.IsNone()
),
'storage': typchk.Or(str, typchk.IsNone()),
'tags': typchk.Or([str], typchk.IsNone())
})
_terminate_chk = typchk.Checker({
'container': int
})
DefaultNetworking = object()
def __init__(self, client):
self._client = client
def create(self, root_url, mount=None, host_network=False, nics=DefaultNetworking, port=None, hostname=None, privileged=True, storage=None, tags=None):
"""
Creater a new container with the given root flist, mount points and
zerotier id, and connected to the given bridges
:param root_url: The root filesystem flist
:param mount: a dict with {host_source: container_target} mount points.
where host_source directory must exists.
host_source can be a url to a flist to mount.
:param host_network: Specify if the container should share the same network stack as the host.
if True, container creation ignores both zerotier, bridge and ports arguments below. Not
giving errors if provided.
:param nics: Configure the attached nics to the container
each nic object is a dict of the format
{
'type': nic_type # default, bridge, zerotier, vlan, or vxlan (note, vlan and vxlan only supported by ovs)
'id': id # depends on the type, bridge name, zerotier network id, the vlan tag or the vxlan id
'name': name of the nic inside the container (ignored in zerotier type)
'hwaddr': Mac address of nic.
'config': { # config is only honored for bridge, vlan, and vxlan types
'dhcp': bool,
'cidr': static_ip # ip/mask
'gateway': gateway
'dns': [dns]
}
}
:param port: A dict of host_port: container_port pairs (only if default networking is enabled)
Example:
`port={8080: 80, 7000:7000}`
:param hostname: Specific hostname you want to give to the container.
if None it will automatically be set to core-x,
x beeing the ID of the container
:param privileged: If true, container runs in privileged mode.
:param storage: A Url to the ardb storage to use to mount the root flist (or any other mount that requires g8fs)
if not provided, the default one from core0 configuration will be used.
"""
if nics == self.DefaultNetworking:
nics = [{'type': 'default'}]
elif nics is None:
nics = []
args = {
'root': root_url,
'mount': mount,
'host_network': host_network,
'nics': nics,
'port': port,
'hostname': hostname,
'privileged': privileged,
'storage': storage,
'tags': tags,
}
# validate input
self._create_chk.check(args)
response = self._client.raw('corex.create', args)
return response
def list(self):
"""
List running containers
:return: a dict with {container_id: <container info object>}
"""
return self._client.json('corex.list', {})
def find(self, *tags):
"""
Find containers that matches set of tags
:param tags:
:return:
"""
tags = list(map(str, tags))
return self._client.json('corex.find', {'tags': tags})
def terminate(self, container):
"""
Terminate a container given it's id
:param container: container id
:return:
"""
args = {
'container': container,
}
self._terminate_chk.check(args)
response = self._client.raw('corex.terminate', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to terminate container: %s' % result.data)
def client(self, container):
"""
Return a client instance that is bound to that container.
:param container: container id
:return: Client object bound to the specified container id
"""
return ContainerClient(self._client, container)
class IPManager:
class IPBridgeManager:
def __init__(self, client):
self._client = client
def add(self, name, hwaddr=None):
args = {
'name': name,
'hwaddr': hwaddr,
}
return self._client.json("ip.bridge.add", args)
def delete(self, name):
args = {
'name': name,
}
return self._client.json("ip.bridge.del", args)
def addif(self, name, inf):
args = {
'name': name,
'inf': inf,
}
return self._client.json('ip.bridge.addif', args)
def delif(self, name, inf):
args = {
'name': name,
'inf': inf,
}
return self._client.json('ip.bridge.delif', args)
class IPLinkManager:
def __init__(self, client):
self._client = client
def up(self, link):
args = {
'name': link,
}
return self._client.json('ip.link.up', args)
def down(self, link):
args = {
'name': link,
}
return self._client.json('ip.link.down', args)
def name(self, link, name):
args = {
'name': link,
'new': name,
}
return self._client.json('ip.link.name', args)
def list(self):
return self._client.json('ip.link.list', {})
class IPAddrManager:
def __init__(self, client):
self._client = client
def add(self, link, ip):
args = {
'name': link,
'ip': ip,
}
return self._client.json('ip.addr.add', args)
def delete(self, link, ip):
args = {
'name': link,
'ip': ip,
}
return self._client.json('ip.addr.del', args)
def list(self, link):
args = {
'name': link,
}
return self._client.json('ip.addr.list', args)
class IPRouteManager:
def __init__(self, client):
self._client = client
def add(self, dev, dst, gw=None):
args = {
'dev': dev,
'dst': dst,
'gw': gw,
}
return self._client.json('ip.route.add', args)
def delete(self, dev, dst, gw=None):
args = {
'dev': dev,
'dst': dst,
'gw': gw,
}
return self._client.json('ip.route.del', args)
def list(self):
return self._client.json('ip.route.list', {})
def __init__(self, client):
self._client = client
self._bridge = IPManager.IPBridgeManager(client)
self._link = IPManager.IPLinkManager(client)
self._addr = IPManager.IPAddrManager(client)
self._route = IPManager.IPRouteManager(client)
@property
def bridge(self):
return self._bridge
@property
def link(self):
return self._link
@property
def addr(self):
return self._addr
@property
def route(self):
return self._route
class BridgeManager:
_bridge_create_chk = typchk.Checker({
'name': str,
'hwaddr': str,
'network': {
'mode': typchk.Or(typchk.Enum('static', 'dnsmasq'), typchk.IsNone()),
'nat': bool,
'settings': typchk.Map(str, str),
}
})
_bridge_delete_chk = typchk.Checker({
'name': str,
})
def __init__(self, client):
self._client = client
def create(self, name, hwaddr=None, network=None, nat=False, settings={}):
"""
Create a bridge with the given name, hwaddr and networking setup
:param name: name of the bridge (must be unique), 15 characters or less, and not equal to "default".
:param hwaddr: MAC address of the bridge. If none, a one will be created for u
:param network: Networking mode, options are none, static, and dnsmasq
:param nat: If true, SNAT will be enabled on this bridge. (IF and ONLY IF an IP is set on the bridge
via the settings, otherwise flag will be ignored) (the cidr attribute of either static, or dnsmasq modes)
:param settings: Networking setting, depending on the selected mode.
none:
no settings, bridge won't get any ip settings
static:
settings={'cidr': 'ip/net'}
bridge will get assigned the given IP address
dnsmasq:
settings={'cidr': 'ip/net', 'start': 'ip', 'end': 'ip'}
bridge will get assigned the ip in cidr
and each running container that is attached to this IP will get
IP from the start/end range. Netmask of the range is the netmask
part of the provided cidr.
if nat is true, SNAT rules will be automatically added in the firewall.
"""
args = {
'name': name,
'hwaddr': hwaddr,
'network': {
'mode': network,
'nat': nat,
'settings': settings,
}
}
self._bridge_create_chk.check(args)
response = self._client.raw('bridge.create', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to create bridge %s' % result.data)
return json.loads(result.data)
def list(self):
"""
List all available bridges
:return: list of bridge names
"""
response = self._client.raw('bridge.list', {})
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to list bridges: %s' % result.data)
return json.loads(result.data)
def delete(self, bridge):
"""
Delete a bridge by name
:param bridge: bridge name
:return:
"""
args = {
'name': bridge,
}
self._bridge_delete_chk.check(args)
response = self._client.raw('bridge.delete', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to list delete: %s' % result.data)
class DiskManager:
_mktable_chk = typchk.Checker({
'disk': str,
'table_type': typchk.Enum('aix', 'amiga', 'bsd', 'dvh', 'gpt', 'mac', 'msdos', 'pc98', 'sun', 'loop')
})
_mkpart_chk = typchk.Checker({
'disk': str,
'start': typchk.Or(int, str),
'end': typchk.Or(int, str),
'part_type': typchk.Enum('primary', 'logical', 'extended'),
})
_getpart_chk = typchk.Checker({
'disk': str,
'part': str,
})
_rmpart_chk = typchk.Checker({
'disk': str,
'number': int,
})
_mount_chk = typchk.Checker({
'options': str,
'source': str,
'target': str,
})
_umount_chk = typchk.Checker({
'source': str,
})
def __init__(self, client):
self._client = client
def list(self):
"""
List available block devices
"""
response = self._client.raw('disk.list', {})
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to list disks: %s' % result.stderr)
if result.level != 20: # 20 is JSON output.
raise RuntimeError('invalid response type from disk.list command')
data = result.data.strip()
if data:
return json.loads(data)
else:
return {}
def mktable(self, disk, table_type='gpt'):
"""
Make partition table on block device.
:param disk: device name (sda, sdb, etc...)
:param table_type: Partition table type as accepted by parted
"""
args = {
'disk': disk,
'table_type': table_type,
}
self._mktable_chk.check(args)
response = self._client.raw('disk.mktable', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to create table: %s' % result.stderr)
def getinfo(self, disk, part=''):
"""
Get more info about a disk or a disk partition
:param disk: (sda, sdb, etc..)
:param part: (sda1, sdb2, etc...)
:return: a dict with {"blocksize", "start", "size", and "free" sections}
"""
args = {
"disk": disk,
"part": part,
}
self._getpart_chk.check(args)
response = self._client.raw('disk.getinfo', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to get info: %s' % result.data)
if result.level != 20: # 20 is JSON output.
raise RuntimeError('invalid response type from disk.getinfo command')
data = result.data.strip()
if data:
return json.loads(data)
else:
return {}
def mkpart(self, disk, start, end, part_type='primary'):
"""
Make partition on disk
:param disk: device name (sda, sdb, etc...)
:param start: partition start as accepted by parted mkpart
:param end: partition end as accepted by parted mkpart
:param part_type: partition type as accepted by parted mkpart
"""
args = {
'disk': disk,
'start': start,
'end': end,
'part_type': part_type,
}
self._mkpart_chk.check(args)
response = self._client.raw('disk.mkpart', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to create partition: %s' % result.stderr)
def rmpart(self, disk, number):
"""
Remove partion from disk
:param disk: device name (sda, sdb, etc...)
:param number: Partition number (starting from 1)
"""
args = {
'disk': disk,
'number': number,
}
self._rmpart_chk.check(args)
response = self._client.raw('disk.rmpart', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to remove partition: %s' % result.stderr)
def mount(self, source, target, options=[]):
"""
Mount partion on target
:param source: Full partition path like /dev/sda1
:param target: Mount point
:param options: Optional mount options
"""
if len(options) == 0:
options = ['']
args = {
'options': ','.join(options),
'source': source,
'target': target,
}
self._mount_chk.check(args)
response = self._client.raw('disk.mount', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to mount partition: %s' % result.stderr)
def umount(self, source):
"""
Unmount partion
:param source: Full partition path like /dev/sda1
"""
args = {
'source': source,
}
self._umount_chk.check(args)
response = self._client.raw('disk.umount', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to umount partition: %s' % result.stderr)
class BtrfsManager:
_create_chk = typchk.Checker({
'label': str,
'metadata': typchk.Enum("raid0", "raid1", "raid5", "raid6", "raid10", "dup", "single", ""),
'data': typchk.Enum("raid0", "raid1", "raid5", "raid6", "raid10", "dup", "single", ""),
'devices': [str],
'overwrite': bool,
})
_device_chk = typchk.Checker({
'mountpoint': str,
'devices': (str,),
})
_subvol_chk = typchk.Checker({
'path': str,
})
_subvol_quota_chk = typchk.Checker({
'path': str,
'limit': str,
})
_subvol_snapshot_chk = typchk.Checker({
'source': str,
'destination': str,
'read_only': bool,
})
def __init__(self, client):
self._client = client
def list(self):
"""
List all btrfs filesystem
"""
return self._client.json('btrfs.list', {})
def info(self, mountpoint):
"""
Get btrfs fs info
"""
return self._client.json('btrfs.info', {'mountpoint': mountpoint})
def create(self, label, devices, metadata_profile="", data_profile="", overwrite=False):
"""
Create a btrfs filesystem with the given label, devices, and profiles
:param label: name/label
:param devices : array of devices (/dev/sda1, etc...)
:metadata_profile: raid0, raid1, raid5, raid6, raid10, dup or single
:data_profile: same as metadata profile
:overwrite: force creation of the filesystem. Overwrite any existing filesystem
"""
args = {
'label': label,
'metadata': metadata_profile,
'data': data_profile,
'devices': devices,
'overwrite': overwrite
}
self._create_chk.check(args)
self._client.sync('btrfs.create', args)
def device_add(self, mountpoint, *device):
"""
Add one or more devices to btrfs filesystem mounted under `mountpoint`
:param mountpoint: mount point of the btrfs system
:param devices: one ore more devices to add
:return:
"""
if len(device) == 0:
return
args = {
'mountpoint': mountpoint,
'devices': device,
}
self._device_chk.check(args)
self._client.sync('btrfs.device_add', args)
def device_remove(self, mountpoint, *device):
"""
Remove one or more devices from btrfs filesystem mounted under `mountpoint`
:param mountpoint: mount point of the btrfs system
:param devices: one ore more devices to remove
:return:
"""
if len(device) == 0:
return
args = {
'mountpoint': mountpoint,
'devices': device,
}
self._device_chk.check(args)
self._client.sync('btrfs.device_remove', args)
def subvol_create(self, path):
"""
Create a btrfs subvolume in the specified path
:param path: path to create
"""
args = {
'path': path
}
self._subvol_chk.check(args)
self._client.sync('btrfs.subvol_create', args)
def subvol_list(self, path):
"""
List a btrfs subvolume in the specified path
:param path: path to be listed
"""
return self._client.json('btrfs.subvol_list', {
'path': path
})
def subvol_delete(self, path):
"""
Delete a btrfs subvolume in the specified path
:param path: path to delete
"""
args = {
'path': path
}
self._subvol_chk.check(args)
self._client.sync('btrfs.subvol_delete', args)
def subvol_quota(self, path, limit):
"""
Apply a quota to a btrfs subvolume in the specified path
:param path: path to apply the quota for (it has to be the path of the subvol)
:param limit: the limit to Apply
"""
args = {
'path': path,
'limit': limit,
}
self._subvol_quota_chk.check(args)
self._client.sync('btrfs.subvol_quota', args)
def subvol_snapshot(self, source, destination, read_only=False):
"""
Take a snapshot
:param source: source path of subvol
:param destination: destination path of snapshot
:param read_only: Set read-only on the snapshot
:return:
"""
args = {
"source": source,
"destination": destination,
"read_only": read_only,
}
self._subvol_snapshot_chk.check(args)
self._client.sync('btrfs.subvol_snapshot', args)
class ZerotierManager:
_network_chk = typchk.Checker({
'network': str,
})
def __init__(self, client):
self._client = client
def join(self, network):
"""
Join a zerotier network
:param network: network id to join
:return:
"""
args = {'network': network}
self._network_chk.check(args)
response = self._client.raw('zerotier.join', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to join zerotier network: %s', result.stderr)
def leave(self, network):
"""
Leave a zerotier network
:param network: network id to leave
:return:
"""
args = {'network': network}
self._network_chk.check(args)
response = self._client.raw('zerotier.leave', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to leave zerotier network: %s', result.stderr)
def list(self):
"""
List joined zerotier networks
:return: list of joined networks with their info
"""
return self._client.json('zerotier.list', {})
def info(self):
"""
Display zerotier status info
:return: dict of zerotier statusinfo
"""
return self._client.json('zerotier.info', {})
class KvmManager:
_iotune_dict = {
'totalbytessecset': bool,
'totalbytessec': int,
'readbytessecset': bool,
'readbytessec': int,
'writebytessecset': bool,
'writebytessec': int,
'totaliopssecset': bool,
'totaliopssec': int,
'readiopssecset': bool,
'readiopssec': int,
'writeiopssecset': bool,
'writeiopssec': int,
'totalbytessecmaxset': bool,
'totalbytessecmax': int,
'readbytessecmaxset': bool,
'readbytessecmax': int,
'writebytessecmaxset': bool,
'writebytessecmax': int,
'totaliopssecmaxset': bool,
'totaliopssecmax': int,
'readiopssecmaxset': bool,
'readiopssecmax': int,
'writeiopssecmaxset': bool,
'writeiopssecmax': int,
'totalbytessecmaxlengthset': bool,
'totalbytessecmaxlength': int,
'readbytessecmaxlengthset': bool,
'readbytessecmaxlength': int,
'writebytessecmaxlengthset': bool,
'writebytessecmaxlength': int,
'totaliopssecmaxlengthset': bool,
'totaliopssecmaxlength': int,
'readiopssecmaxlengthset': bool,
'readiopssecmaxlength': int,
'writeiopssecmaxlengthset': bool,
'writeiopssecmaxlength': int,
'sizeiopssecset': bool,
'sizeiopssec': int,
'groupnameset': bool,
'groupname': str,
}
_media_dict = {
'type': typchk.Or(
typchk.Enum('disk', 'cdrom'),
typchk.Missing()
),
'url': str,
'iotune': typchk.Or(
_iotune_dict,
typchk.Missing()
)
}
_create_chk = typchk.Checker({
'name': str,
'media': typchk.Length([_media_dict], 1),
'cpu': int,
'memory': int,
'nics': [{
'type': typchk.Enum('default', 'bridge', 'vxlan', 'vlan'),
'id': typchk.Or(str, typchk.Missing()),
'hwaddr': typchk.Or(str, typchk.Missing()),
}],
'port': typchk.Or(
typchk.Map(int, int),
typchk.IsNone()
),
})
_domain_action_chk = typchk.Checker({
'uuid': str,
})
_man_disk_action_chk = typchk.Checker({
'uuid': str,
'media': _media_dict,
})
_man_nic_action_chk = typchk.Checker({
'uuid': str,
'type': typchk.Enum('default', 'bridge', 'vxlan', 'vlan'),
'id': typchk.Or(str, typchk.Missing()),
'hwaddr': typchk.Or(str, typchk.Missing()),
})
_migrate_action_chk = typchk.Checker({
'uuid': str,
'desturi': str,
})
_limit_disk_io_dict = {
'uuid': str,
'media': _media_dict,
}
_limit_disk_io_dict.update(_iotune_dict)
_limit_disk_io_action_chk = typchk.Checker(_limit_disk_io_dict)
def __init__(self, client):
self._client = client
def create(self, name, media, cpu=2, memory=512, nics=None, port=None):
"""
:param name: Name of the kvm domain
:param media: array of media objects to attach to the machine, where the first object is the boot device
each media object is a dict of {url, type} where type can be one of 'disk', or 'cdrom', or empty (default to disk)
example: [{'url': 'nbd+unix:///test?socket=/tmp/ndb.socket'}, {'type': 'cdrom': '/somefile.iso'}
:param cpu: number of vcpu cores
:param memory: memory in MiB
:param port: A dict of host_port: container_port pairs
Example:
`port={8080: 80, 7000:7000}`
Only supported if default network is used
:param nics: Configure the attached nics to the container
each nic object is a dict of the format
{
'type': nic_type # default, bridge, vlan, or vxlan (note, vlan and vxlan only supported by ovs)
'id': id # depends on the type, bridge name (bridge type) zerotier network id (zertier type), the vlan tag or the vxlan id
}
:return: uuid of the virtual machine
"""
if nics is None:
nics = []
args = {
'name': name,
'media': media,
'cpu': cpu,
'memory': memory,
'nics': nics,
'port': port,
}
self._create_chk.check(args)
return self._client.sync('kvm.create', args)
def destroy(self, uuid):
"""
Destroy a kvm domain by uuid
:param uuid: uuid of the kvm container (same as the used in create)
:return:
"""
args = {
'uuid': uuid,
}
self._domain_action_chk.check(args)
self._client.sync('kvm.destroy', args)
def shutdown(self, uuid):
"""
Shutdown a kvm domain by uuid
:param uuid: uuid of the kvm container (same as the used in create)
:return:
"""
args = {
'uuid': uuid,
}
self._domain_action_chk.check(args)
self._client.sync('kvm.shutdown', args)
def reboot(self, uuid):
"""
Reboot a kvm domain by uuid
:param uuid: uuid of the kvm container (same as the used in create)
:return:
"""
args = {
'uuid': uuid,
}
self._domain_action_chk.check(args)
self._client.sync('kvm.reboot', args)
def reset(self, uuid):
"""
Reset (Force reboot) a kvm domain by uuid
:param uuid: uuid of the kvm container (same as the used in create)
:return:
"""
args = {
'uuid': uuid,
}
self._domain_action_chk.check(args)
self._client.sync('kvm.reset', args)
def pause(self, uuid):
"""
Pause a kvm domain by uuid
:param uuid: uuid of the kvm container (same as the used in create)
:return:
"""
args = {
'uuid': uuid,
}
self._domain_action_chk.check(args)
self._client.sync('kvm.pause', args)
def resume(self, uuid):
"""
Resume a kvm domain by uuid
:param uuid: uuid of the kvm container (same as the used in create)
:return:
"""
args = {
'uuid': uuid,
}
self._domain_action_chk.check(args)
self._client.sync('kvm.resume', args)
def info(self, uuid):
"""
Get info about a kvm domain by uuid
:param uuid: uuid of the kvm container (same as the used in create)
:return:
"""
args = {
'uuid': uuid,
}
self._domain_action_chk.check(args)
return self._client.json('kvm.info', args)
def infops(self, uuid):
"""
Get info per second about a kvm domain by uuid
:param uuid: uuid of the kvm container (same as the used in create)
:return:
"""
args = {
'uuid': uuid,
}
self._domain_action_chk.check(args)
return self._client.json('kvm.infops', args)
def attach_disk(self, uuid, media):
"""
Attach a disk to a machine
:param uuid: uuid of the kvm container (same as the used in create)
:param media: the media object to attach to the machine
media object is a dict of {url, and type} where type can be one of 'disk', or 'cdrom', or empty (default to disk)
examples: {'url': 'nbd+unix:///test?socket=/tmp/ndb.socket'}, {'type': 'cdrom': '/somefile.iso'}
:return:
"""
args = {
'uuid': uuid,
'media': media,
}
self._man_disk_action_chk.check(args)
self._client.sync('kvm.attach_disk', args)
def detach_disk(self, uuid, media):
"""
Detach a disk from a machine
:param uuid: uuid of the kvm container (same as the used in create)
:param media: the media object to attach to the machine
media object is a dict of {url, and type} where type can be one of 'disk', or 'cdrom', or empty (default to disk)
examples: {'url': 'nbd+unix:///test?socket=/tmp/ndb.socket'}, {'type': 'cdrom': '/somefile.iso'}
:return:
"""
args = {
'uuid': uuid,
'media': media,
}
self._man_disk_action_chk.check(args)
self._client.sync('kvm.detach_disk', args)
def add_nic(self, uuid, type, id=None, hwaddr=None):
"""
Add a nic to a machine
:param uuid: uuid of the kvm container (same as the used in create)
:param type: nic_type # default, bridge, vlan, or vxlan (note, vlan and vxlan only supported by ovs)
param id: id # depends on the type, bridge name (bridge type) zerotier network id (zertier type), the vlan tag or the vxlan id
param hwaddr: the hardware address of the nic
:return:
"""
args = {
'uuid': uuid,
'type': type,
'id': id,
'hwaddr': hwaddr,
}
self._man_nic_action_chk.check(args)
return self._client.json('kvm.add_nic', args)
def remove_nic(self, uuid, type, id=None, hwaddr=None):
"""
Remove a nic from a machine
:param uuid: uuid of the kvm container (same as the used in create)
:param type: nic_type # default, bridge, vlan, or vxlan (note, vlan and vxlan only supported by ovs)
param id: id # depends on the type, bridge name (bridge type) zerotier network id (zertier type), the vlan tag or the vxlan id
param hwaddr: the hardware address of the nic
:return:
"""
args = {
'uuid': uuid,
'type': type,
'id': id,
'hwaddr': hwaddr,
}
self._man_nic_action_chk.check(args)
return self._client.json('kvm.remove_nic', args)
def limit_disk_io(self, uuid, media, totalbytessecset=False, totalbytessec=0, readbytessecset=False, readbytessec=0, writebytessecset=False,
writebytessec=0, totaliopssecset=False, totaliopssec=0, readiopssecset=False, readiopssec=0, writeiopssecset=False, writeiopssec=0,
totalbytessecmaxset=False, totalbytessecmax=0, readbytessecmaxset=False, readbytessecmax=0, writebytessecmaxset=False, writebytessecmax=0,
totaliopssecmaxset=False, totaliopssecmax=0, readiopssecmaxset=False, readiopssecmax=0, writeiopssecmaxset=False, writeiopssecmax=0,
totalbytessecmaxlengthset=False, totalbytessecmaxlength=0, readbytessecmaxlengthset=False, readbytessecmaxlength=0,
writebytessecmaxlengthset=False, writebytessecmaxlength=0, totaliopssecmaxlengthset=False, totaliopssecmaxlength=0,
readiopssecmaxlengthset=False, readiopssecmaxlength=0, writeiopssecmaxlengthset=False, writeiopssecmaxlength=0, sizeiopssecset=False,
sizeiopssec=0, groupnameset=False, groupname=''):
"""
Remove a nic from a machine
:param uuid: uuid of the kvm container (same as the used in create)
:param media: the media to limit the diskio
:return:
"""
args = {
'uuid': uuid,
'media': media,
'totalbytessecset': totalbytessecset,
'totalbytessec': totalbytessec,
'readbytessecset': readbytessecset,
'readbytessec': readbytessec,
'writebytessecset': writebytessecset,
'writebytessec': writebytessec,
'totaliopssecset': totaliopssecset,
'totaliopssec': totaliopssec,
'readiopssecset': readiopssecset,
'readiopssec': readiopssec,
'writeiopssecset': writeiopssecset,
'writeiopssec': writeiopssec,
'totalbytessecmaxset': totalbytessecmaxset,
'totalbytessecmax': totalbytessecmax,
'readbytessecmaxset': readbytessecmaxset,
'readbytessecmax': readbytessecmax,
'writebytessecmaxset': writebytessecmaxset,
'writebytessecmax': writebytessecmax,
'totaliopssecmaxset': totaliopssecmaxset,
'totaliopssecmax': totaliopssecmax,
'readiopssecmaxset': readiopssecmaxset,
'readiopssecmax': readiopssecmax,
'writeiopssecmaxset': writeiopssecmaxset,
'writeiopssecmax': writeiopssecmax,
'totalbytessecmaxlengthset': totalbytessecmaxlengthset,
'totalbytessecmaxlength': totalbytessecmaxlength,
'readbytessecmaxlengthset': readbytessecmaxlengthset,
'readbytessecmaxlength': readbytessecmaxlength,
'writebytessecmaxlengthset': writebytessecmaxlengthset,
'writebytessecmaxlength': writebytessecmaxlength,
'totaliopssecmaxlengthset': totaliopssecmaxlengthset,
'totaliopssecmaxlength': totaliopssecmaxlength,
'readiopssecmaxlengthset': readiopssecmaxlengthset,
'readiopssecmaxlength': readiopssecmaxlength,
'writeiopssecmaxlengthset': writeiopssecmaxlengthset,
'writeiopssecmaxlength': writeiopssecmaxlength,
'sizeiopssecset': sizeiopssecset,
'sizeiopssec': sizeiopssec,
'groupnameset': groupnameset,
'groupname': groupname,
}
self._limit_disk_io_action_chk.check(args)
self._client.sync('kvm.limit_disk_io', args)
def migrate(self, uuid, desturi):
"""
Migrate a vm to another node
:param uuid: uuid of the kvm container (same as the used in create)
:param desturi: the uri of the destination node
:return:
"""
args = {
'uuid': uuid,
'desturi': desturi,
}
self._migrate_action_chk.check(args)
self._client.sync('kvm.migrate', args)
def list(self):
"""
List configured domains
:return:
"""
return self._client.json('kvm.list', {})
class Logger:
_level_chk = typchk.Checker({
'level': typchk.Enum("CRITICAL", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG"),
})
def __init__(self, client):
self._client = client
def set_level(self, level):
"""
Set the log level of the g8os
:param level: the level to be set can be one of ("CRITICAL", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG")
"""
args = {
'level': level,
}
self._level_chk.check(args)
return self._client.json('logger.set_level', args)
def reopen(self):
"""
Reopen log file
"""
return self._client.json('logger.reopen', {})
class Nft:
_port_chk = typchk.Checker({
'port': int,
'interface': typchk.Or(str, typchk.Missing()),
'subnet': typchk.Or(str, typchk.Missing()),
})
def __init__(self, client):
self._client = client
def open_port(self, port, interface=None, subnet=None):
"""
open port
:param port: then port number
:param interface: an optional interface to open the port for
:param subnet: an optional subnet to open the port for
"""
args = {
'port': port,
'interface': interface,
'subnet': subnet,
}
self._port_chk.check(args)
return self._client.json('nft.open_port', args)
def drop_port(self, port, interface=None, subnet=None):
"""
close an opened port (takes the same parameters passed in open)
:param port: then port number
:param interface: an optional interface to close the port for
:param subnet: an optional subnet to close the port for
"""
args = {
'port': port,
'interface': interface,
'subnet': subnet,
}
self._port_chk.check(args)
return self._client.json('nft.drop_port', args)
class Config:
def __init__(self, client):
self._client = client
def get(self):
"""
Get the config of g8os
"""
return self._client.json('config.get', {})
class Experimental:
def __init__(self, client):
pass
class Client(BaseClient):
def __init__(self, host, port=6379, password="", db=0, ssl=True, timeout=None, testConnectionAttempts=3):
super().__init__(timeout=timeout)
socket_timeout = (timeout + 5) if timeout else 15
self._redis = redis.Redis(host=host, port=port, password=password, db=db, ssl=ssl,
socket_timeout=socket_timeout,
socket_keepalive=True, socket_keepalive_options={
socket.TCP_KEEPINTVL: 1,
socket.TCP_KEEPCNT: 10
})
self._container_manager = ContainerManager(self)
self._bridge_manager = BridgeManager(self)
self._disk_manager = DiskManager(self)
self._btrfs_manager = BtrfsManager(self)
self._zerotier = ZerotierManager(self)
self._experimntal = Experimental(self)
self._kvm = KvmManager(self)
self._logger = Logger(self)
self._nft = Nft(self)
self._config = Config(self)
if testConnectionAttempts:
for _ in range(testConnectionAttempts):
try:
self.ping()
except:
pass
else:
return
raise RuntimeError("Could not connect to remote host %s" % host)
@property
def experimental(self):
return self._experimntal
@property
def container(self):
return self._container_manager
@property
def bridge(self):
return self._bridge_manager
@property
def disk(self):
return self._disk_manager
@property
def btrfs(self):
return self._btrfs_manager
@property
def zerotier(self):
return self._zerotier
@property
def kvm(self):
return self._kvm
@property
def logger(self):
return self._logger
@property
def nft(self):
return self._nft
@property
def config(self):
return self._config
def raw(self, command, arguments, queue=None, max_time=None):
"""
Implements the low level command call, this needs to build the command structure
and push it on the correct queue.
:param command: Command name to execute supported by the node (ex: core.system, info.cpu, etc...)
check documentation for list of built in commands
:param arguments: A dict of required command arguments depends on the command name.
:return: Response object
"""
id = str(uuid.uuid4())
payload = {
'id': id,
'command': command,
'arguments': arguments,
'queue': queue,
'max_time': max_time,
}
flag = 'result:{}:flag'.format(id)
self._redis.rpush('core:default', json.dumps(payload))
if self._redis.brpoplpush(flag, flag, DefaultTimeout) is None:
Timeout('failed to queue job {}'.format(id))
logger.debug('%s >> g8core.%s(%s)', id, command, ', '.join(("%s=%s" % (k, v) for k, v in arguments.items())))
return Response(self, id)
def response_for(self, id):
return Response(self, id) | 0-core-client | /0-core-client-1.1.0a4.tar.gz/0-core-client-1.1.0a4/zeroos/core0/client/client.py | client.py |
missing = object()
def primitive(typ):
return typ in [str, int, float, bool]
class CheckerException(BaseException):
pass
class Tracker(BaseException):
def __init__(self, base):
self._base = base
self._reason = None
self._branches = []
@property
def branches(self):
return self._branches
def copy(self):
l = self._base.copy()
t = Tracker(l)
t._reason = self._reason
return t
def push(self, s):
t = self.copy()
t._base.append(str(s))
return t
def pop(self):
t = self.copy()
t._base.pop()
return t
def reason(self, reason):
self._reason = reason
return self
def branch(self, tracker):
t = tracker.copy()
self._branches.append(t)
def __str__(self):
u = "/".join(self._base)
if self._reason is not None:
u = '[{}] at -> {}'.format(self._reason, u)
for branch in self.branches:
u += '\n -> {}'.format(branch)
return u
def __repr__(self):
return str(self)
class Option:
def __init__(self):
raise NotImplementedError()
def check(self, object, t):
raise NotImplementedError()
class Or(Option):
def __init__(self, *types):
self._checkers = []
for typ in types:
self._checkers.append(Checker(typ))
def check(self, object, t):
bt = t.copy()
for chk in self._checkers:
try:
chk.check(object, bt)
return
except Tracker as tx:
t.branch(tx)
raise t.reason('all branches failed')
class IsNone(Option):
def __init__(self):
pass
def check(self, object, t):
if object is not None:
raise t.reason('is not none')
class Missing(Option):
def __init__(self):
pass
def check(self, object, t):
if object != missing:
raise t.reason('is not missing')
class Any(Option):
def __init__(self):
pass
def check(self, object, t):
return
class Length(Option):
def __init__(self, typ, min=None, max=None):
self._checker = Checker(typ)
if min is None and max is None:
raise ValueError("you have to pass wither min or max to the length type checker")
self._min = min
self._max = max
def check(self, object, t):
self._checker.check(object, t)
if self._min is not None and len(object) < self._min:
raise t.reason('invalid length, expecting more than or equal {} got {}'.format(self._min, len(object)))
if self._max is not None and len(object) > self._max:
raise t.reason('invalid length, expecting less than or equal {} got {}'.format(self._max, len(object)))
class Map(Option):
def __init__(self, key_type, value_type):
self._key = Checker(key_type)
self._value = Checker(value_type)
def check(self, object, t):
if not isinstance(object, dict):
raise t.reason('expecting a dict, got {}'.format(type(object)))
for k, v in object.items():
tx = t.push(k)
self._key.check(k, tx)
tv = t.push('{}[value]'.format(k))
self._value.check(v, tv)
class Enum(Option):
def __init__(self, *valid):
self._valid = valid
def check(self, object, t):
if not isinstance(object, str):
raise t.reason('expecting string, got {}'.format(type(object)))
if object not in self._valid:
raise t.reason('value "{}" not in enum'.format(object))
class Checker:
"""
Build a type checker to check method inputs
A Checker takes a type definition as following
c = Checker(<type-def>)
then use c to check inputs as
valid = c.check(value)
type-def:
- primitive types (str, bool, int, float)
- composite types ([str], [int], etc...)
- dicts types ({'name': str, 'age': float, etc...})
To build a more complex type-def u can use the available Options in typechk module
- Or(type-def, type-def, ...)
- Missing() (Only make sense in dict types)
- IsNone() (accept None value)
Example of type definition
A dict object, with the following attributes
- `name` of type string
- optional `age` which can be int, or float
- A list of children each has
- string name
- float age
c = Checker({
'name': str,
'age': Or(int, float, Missing()),
'children': [{'name': str, 'age': float}]
})
c.check({'name': 'azmy', 'age': 34, children:[]}) # passes
c.check({'name': 'azmy', children:[]}) # passes
c.check({'age': 34, children:[]}) # does not pass
c.check({'name': 'azmy', children:[{'name': 'yahia', 'age': 4.0}]}) # passes
c.check({'name': 'azmy', children:[{'name': 'yahia', 'age': 4.0}, {'name': 'yassine'}]}) # does not pass
"""
def __init__(self, tyepdef):
self._typ = tyepdef
def check(self, object, tracker=None):
if tracker is None:
tracker = Tracker([]).push('/')
return self._check(self._typ, object, tracker)
def _check_list(self, typ, obj_list, t):
for i, elem in enumerate(obj_list):
tx = t.push('[{}]'.format(i))
self._check(typ, elem, tx)
def _check_dict(self, typ, obj_dict, t):
given = []
for name, value in obj_dict.items():
tx = t.push(name)
if name not in typ:
raise tx.reason('unknown key "{}"'.format(name))
given.append(name)
attr_type = typ[name]
self._check(attr_type, value, tx)
if len(given) == len(typ):
return
type_keys = list(typ.keys())
for key in given:
type_keys.remove(key)
for required in type_keys:
tx = t.push(required)
self._check(typ[required], missing, tx)
def _check(self, typ, object, t):
if isinstance(typ, Option):
return typ.check(object, t)
atyp = type(object)
if primitive(atyp) and atyp != typ:
raise t.reason('invalid type, expecting {}'.format(typ))
if isinstance(typ, list):
if atyp != list:
raise t.reason('expecting a list')
self._check_list(typ[0], object, t)
if isinstance(typ, tuple):
if atyp != tuple:
raise t.reason('expecting a tuple')
self._check_list(typ[0], object, t)
elif isinstance(typ, dict):
if atyp != dict:
raise t.reason('expecting a dict')
self._check_dict(typ, object, t) | 0-core-client | /0-core-client-1.1.0a4.tar.gz/0-core-client-1.1.0a4/zeroos/core0/client/typchk.py | typchk.py |
import json
import collections
from datetime import datetime
from uuid import UUID
from enum import Enum
from dateutil import parser
# python2/3 compatible basestring, for use in to_dict
try:
basestring
except NameError:
basestring = str
def timestamp_from_datetime(datetime):
"""
Convert from datetime format to timestamp format
Input: Time in datetime format
Output: Time in timestamp format
"""
return datetime.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
def timestamp_to_datetime(timestamp):
"""
Convert from timestamp format to datetime format
Input: Time in timestamp format
Output: Time in datetime format
"""
return parser.parse(timestamp).replace(tzinfo=None)
def has_properties(cls, property, child_properties):
for child_prop in child_properties:
if getattr(property, child_prop, None) is None:
return False
return True
def list_factory(val, member_type):
if not isinstance(val, list):
raise ValueError('list_factory: value must be a list')
return [val_factory(v, member_type) for v in val]
def dict_factory(val, objmap):
# objmap is a dict outlining the structure of this value
# its format is {'attrname': {'datatype': [type], 'required': bool}}
objdict = {}
for attrname, attrdict in objmap.items():
value = val.get(attrname)
if value is not None:
for dt in attrdict['datatype']:
try:
if isinstance(dt, dict):
objdict[attrname] = dict_factory(value, attrdict)
else:
objdict[attrname] = val_factory(value, [dt])
except Exception:
pass
if objdict.get(attrname) is None:
raise ValueError('dict_factory: {attr}: unable to instantiate with any supplied type'.format(attr=attrname))
elif attrdict.get('required'):
raise ValueError('dict_factory: {attr} is required'.format(attr=attrname))
return objdict
def val_factory(val, datatypes):
"""
return an instance of `val` that is of type `datatype`.
keep track of exceptions so we can produce meaningful error messages.
"""
exceptions = []
for dt in datatypes:
try:
if isinstance(val, dt):
return val
return type_handler_object(val, dt)
except Exception as e:
exceptions.append(str(e))
# if we get here, we never found a valid value. raise an error
raise ValueError('val_factory: Unable to instantiate {val} from types {types}. Exceptions: {excs}'.
format(val=val, types=datatypes, excs=exceptions))
def to_json(cls, indent=0):
"""
serialize to JSON
:rtype: str
"""
# for consistency, use as_dict then go to json from there
return json.dumps(cls.as_dict(), indent=indent)
def to_dict(cls, convert_datetime=True):
"""
return a dict representation of the Event and its sub-objects
`convert_datetime` controls whether datetime objects are converted to strings or not
:rtype: dict
"""
def todict(obj):
"""
recurse the objects and represent as a dict
use the registered handlers if possible
"""
data = {}
if isinstance(obj, dict):
for (key, val) in obj.items():
data[key] = todict(val)
return data
if not convert_datetime and isinstance(obj, datetime):
return obj
elif type_handler_value(obj):
return type_handler_value(obj)
elif isinstance(obj, collections.Sequence) and not isinstance(obj, basestring):
return [todict(v) for v in obj]
elif hasattr(obj, "__dict__"):
for key, value in obj.__dict__.items():
if not callable(value) and not key.startswith('_'):
data[key] = todict(value)
return data
else:
return obj
return todict(cls)
class DatetimeHandler(object):
"""
output datetime objects as iso-8601 compliant strings
"""
@classmethod
def flatten(cls, obj):
"""flatten"""
return timestamp_from_datetime(obj)
@classmethod
def restore(cls, data):
"""restore"""
return timestamp_to_datetime(data)
class UUIDHandler(object):
"""
output UUID objects as a string
"""
@classmethod
def flatten(cls, obj):
"""flatten"""
return str(obj)
@classmethod
def restore(cls, data):
"""restore"""
return UUID(data)
class EnumHandler(object):
"""
output Enum objects as their value
"""
@classmethod
def flatten(cls, obj):
"""flatten"""
return obj.value
@classmethod
def restore(cls, data):
"""
cannot restore here because we don't know what type of enum it is
"""
raise NotImplementedError
handlers = {
datetime: DatetimeHandler,
Enum: EnumHandler,
UUID: UUIDHandler,
}
def handler_for(obj):
"""return the handler for the object type"""
for handler_type in handlers:
if isinstance(obj, handler_type):
return handlers[handler_type]
try:
for handler_type in handlers:
if issubclass(obj, handler_type):
return handlers[handler_type]
except TypeError:
# if obj isn't a class, issubclass will raise a TypeError
pass
def type_handler_value(obj):
"""
return the serialized (flattened) value from the registered handler for the type
"""
handler = handler_for(obj)
if handler:
return handler().flatten(obj)
def type_handler_object(val, objtype):
"""
return the deserialized (restored) value from the registered handler for the type
"""
handler = handlers.get(objtype)
if handler:
return handler().restore(val)
else:
return objtype(val) | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/client_support.py | client_support.py |
import requests
from .graphs_service import GraphsService
from .health_service import HealthService
from .nodes_service import NodesService
from .storageclusters_service import StorageclustersService
from .vdisks_service import VdisksService
class Client:
def __init__(self, base_uri=""):
self.base_url = base_uri
self.session = requests.Session()
self.graphs = GraphsService(self)
self.health = HealthService(self)
self.nodes = NodesService(self)
self.storageclusters = StorageclustersService(self)
self.vdisks = VdisksService(self)
def is_goraml_class(self, data):
# check if a data is go-raml generated class
# we currently only check the existence
# of as_json method
op = getattr(data, "as_json", None)
if callable(op):
return True
return False
def set_auth_header(self, val):
''' set authorization header value'''
self.session.headers.update({"Authorization": val})
def _get_headers(self, headers, content_type):
if content_type:
contentheader = {"Content-Type": content_type}
if headers is None:
headers = contentheader
else:
headers.update(contentheader)
return headers
def _handle_data(self, uri, data, headers, params, content_type, method):
headers = self._get_headers(headers, content_type)
if self.is_goraml_class(data):
data = data.as_json()
if content_type == "multipart/form-data":
# when content type is multipart/formdata remove the content-type header
# as requests will set this itself with correct boundary
headers.pop('Content-Type')
res = method(uri, files=data, headers=headers, params=params)
elif data is None:
res = method(uri, headers=headers, params=params)
elif type(data) is str:
res = method(uri, data=data, headers=headers, params=params)
else:
res = method(uri, json=data, headers=headers, params=params)
res.raise_for_status()
return res
def post(self, uri, data, headers, params, content_type):
return self._handle_data(uri, data, headers, params, content_type, self.session.post)
def put(self, uri, data, headers, params, content_type):
return self._handle_data(uri, data, headers, params, content_type, self.session.put)
def patch(self, uri, data, headers, params, content_type):
return self._handle_data(uri, data, headers, params, content_type, self.session.patch)
def get(self, uri, data, headers, params, content_type):
return self._handle_data(uri, data, headers, params, content_type, self.session.get)
def delete(self, uri, data, headers, params, content_type):
return self._handle_data(uri, data, headers, params, content_type, self.session.delete) | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/client.py | client.py |
class NodesService:
def __init__(self, client):
self.client = client
def DeleteBridge(self, bridgeid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Remove bridge
It is method for DELETE /nodes/{nodeid}/bridges/{bridgeid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/bridges/"+bridgeid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetBridge(self, bridgeid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get bridge details
It is method for GET /nodes/{nodeid}/bridges/{bridgeid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/bridges/"+bridgeid
return self.client.get(uri, None, headers, query_params, content_type)
def ListBridges(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List bridges
It is method for GET /nodes/{nodeid}/bridges
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/bridges"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateBridge(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Creates a new bridge
It is method for POST /nodes/{nodeid}/bridges
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/bridges"
return self.client.post(uri, data, headers, query_params, content_type)
def GetContainerCPUInfo(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of all CPUs in the container
It is method for GET /nodes/{nodeid}/containers/{containername}/cpus
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/cpus"
return self.client.get(uri, None, headers, query_params, content_type)
def GetContainerDiskInfo(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of all the disks in the container
It is method for GET /nodes/{nodeid}/containers/{containername}/disks
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/disks"
return self.client.get(uri, None, headers, query_params, content_type)
def FileDelete(self, data, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete file from container
It is method for DELETE /nodes/{nodeid}/containers/{containername}/filesystem
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/filesystem"
return self.client.delete(uri, data, headers, query_params, content_type)
def FileDownload(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Download file from container
It is method for GET /nodes/{nodeid}/containers/{containername}/filesystem
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/filesystem"
return self.client.get(uri, None, headers, query_params, content_type)
def FileUpload(self, data, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Upload file to container
It is method for POST /nodes/{nodeid}/containers/{containername}/filesystem
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/filesystem"
return self.client.post(uri, data, headers, query_params, content_type)
def GetContainerOSInfo(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of the container OS
It is method for GET /nodes/{nodeid}/containers/{containername}/info
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/info"
return self.client.get(uri, None, headers, query_params, content_type)
def KillContainerJob(self, jobid, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Kills the job
It is method for DELETE /nodes/{nodeid}/containers/{containername}/jobs/{jobid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/jobs/"+jobid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetContainerJob(self, jobid, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get details of a submitted job on the container
It is method for GET /nodes/{nodeid}/containers/{containername}/jobs/{jobid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/jobs/"+jobid
return self.client.get(uri, None, headers, query_params, content_type)
def SendSignalToJob(self, data, jobid, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Send signal to the job
It is method for POST /nodes/{nodeid}/containers/{containername}/jobs/{jobid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/jobs/"+jobid
return self.client.post(uri, data, headers, query_params, content_type)
def KillAllContainerJobs(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Kill all running jobs on the container
It is method for DELETE /nodes/{nodeid}/containers/{containername}/jobs
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/jobs"
return self.client.delete(uri, None, headers, query_params, content_type)
def ListContainerJobs(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List running jobs on the container
It is method for GET /nodes/{nodeid}/containers/{containername}/jobs
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/jobs"
return self.client.get(uri, None, headers, query_params, content_type)
def StartContainerJob(self, data, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Start a new job in this container
It is method for POST /nodes/{nodeid}/containers/{containername}/jobs
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/jobs"
return self.client.post(uri, data, headers, query_params, content_type)
def GetContainerMemInfo(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information about the memory in the container
It is method for GET /nodes/{nodeid}/containers/{containername}/mem
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/mem"
return self.client.get(uri, None, headers, query_params, content_type)
def GetContainerNicInfo(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information about the network interfaces in the container
It is method for GET /nodes/{nodeid}/containers/{containername}/nics
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/nics"
return self.client.get(uri, None, headers, query_params, content_type)
def PingContainer(self, data, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Ping this container
It is method for POST /nodes/{nodeid}/containers/{containername}/ping
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/ping"
return self.client.post(uri, data, headers, query_params, content_type)
def KillContainerProcess(self, processid, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Kills the process by sending sigterm signal to the process. If it is still running, a sigkill signal will be sent to the process
It is method for DELETE /nodes/{nodeid}/containers/{containername}/processes/{processid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/processes/"+processid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetContainerProcess(self, processid, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get process details
It is method for GET /nodes/{nodeid}/containers/{containername}/processes/{processid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/processes/"+processid
return self.client.get(uri, None, headers, query_params, content_type)
def SendSignalToProcess(self, data, processid, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Send signal to the process
It is method for POST /nodes/{nodeid}/containers/{containername}/processes/{processid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/processes/"+processid
return self.client.post(uri, data, headers, query_params, content_type)
def ListContainerProcesses(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get running processes in this container
It is method for GET /nodes/{nodeid}/containers/{containername}/processes
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/processes"
return self.client.get(uri, None, headers, query_params, content_type)
def StartContainer(self, data, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Start container instance
It is method for POST /nodes/{nodeid}/containers/{containername}/start
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/start"
return self.client.post(uri, data, headers, query_params, content_type)
def GetContainerState(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get aggregated consumption of container + all processes (CPU, memory, etc.)
It is method for GET /nodes/{nodeid}/containers/{containername}/state
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/state"
return self.client.get(uri, None, headers, query_params, content_type)
def StopContainer(self, data, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Stop container instance
It is method for POST /nodes/{nodeid}/containers/{containername}/stop
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername+"/stop"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteContainer(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete container instance
It is method for DELETE /nodes/{nodeid}/containers/{containername}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername
return self.client.delete(uri, None, headers, query_params, content_type)
def GetContainer(self, containername, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get container
It is method for GET /nodes/{nodeid}/containers/{containername}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers/"+containername
return self.client.get(uri, None, headers, query_params, content_type)
def ListContainers(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List running containers
It is method for GET /nodes/{nodeid}/containers
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateContainer(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Create a new container
It is method for POST /nodes/{nodeid}/containers
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/containers"
return self.client.post(uri, data, headers, query_params, content_type)
def GetCPUInfo(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of all CPUs in the node
It is method for GET /nodes/{nodeid}/cpus
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/cpus"
return self.client.get(uri, None, headers, query_params, content_type)
def GetDiskInfo(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of all the disks in the node
It is method for GET /nodes/{nodeid}/disks
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/disks"
return self.client.get(uri, None, headers, query_params, content_type)
def GetGWFWConfig(self, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get current FW config
It is method for GET /nodes/{nodeid}/gws/{gwname}/advanced/firewall
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/advanced/firewall"
return self.client.get(uri, None, headers, query_params, content_type)
def SetGWFWConfig(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Set FW config
Once used you can not use gw.portforwards any longer
It is method for POST /nodes/{nodeid}/gws/{gwname}/advanced/firewall
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/advanced/firewall"
return self.client.post(uri, data, headers, query_params, content_type)
def GetGWHTTPConfig(self, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get current HTTP config
It is method for GET /nodes/{nodeid}/gws/{gwname}/advanced/http
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/advanced/http"
return self.client.get(uri, None, headers, query_params, content_type)
def SetGWHTTPConfig(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Set HTTP config
Once used you can not use gw.httpproxxies any longer
It is method for POST /nodes/{nodeid}/gws/{gwname}/advanced/http
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/advanced/http"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteDHCPHost(self, macaddress, interface, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete dhcp host
It is method for DELETE /nodes/{nodeid}/gws/{gwname}/dhcp/{interface}/hosts/{macaddress}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/dhcp/"+interface+"/hosts/"+macaddress
return self.client.delete(uri, None, headers, query_params, content_type)
def ListGWDHCPHosts(self, interface, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List DHCPHosts for specified interface
It is method for GET /nodes/{nodeid}/gws/{gwname}/dhcp/{interface}/hosts
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/dhcp/"+interface+"/hosts"
return self.client.get(uri, None, headers, query_params, content_type)
def AddGWDHCPHost(self, data, interface, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Add a dhcp host to a specified interface
It is method for POST /nodes/{nodeid}/gws/{gwname}/dhcp/{interface}/hosts
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/dhcp/"+interface+"/hosts"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteGWForward(self, forwardid, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete portforward, forwardid = srcip:srcport
It is method for DELETE /nodes/{nodeid}/gws/{gwname}/firewall/forwards/{forwardid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/firewall/forwards/"+forwardid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetGWForwards(self, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get list for IPv4 Forwards
It is method for GET /nodes/{nodeid}/gws/{gwname}/firewall/forwards
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/firewall/forwards"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateGWForwards(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Create a new Portforwarding
It is method for POST /nodes/{nodeid}/gws/{gwname}/firewall/forwards
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/firewall/forwards"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteHTTPProxies(self, proxyid, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete HTTP proxy
It is method for DELETE /nodes/{nodeid}/gws/{gwname}/httpproxies/{proxyid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/httpproxies/"+proxyid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetHTTPProxy(self, proxyid, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get info of HTTP proxy
It is method for GET /nodes/{nodeid}/gws/{gwname}/httpproxies/{proxyid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/httpproxies/"+proxyid
return self.client.get(uri, None, headers, query_params, content_type)
def ListHTTPProxies(self, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List for HTTP proxies
It is method for GET /nodes/{nodeid}/gws/{gwname}/httpproxies
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/httpproxies"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateHTTPProxies(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Create new HTTP proxies
It is method for POST /nodes/{nodeid}/gws/{gwname}/httpproxies
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/httpproxies"
return self.client.post(uri, data, headers, query_params, content_type)
def StartGateway(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Start Gateway instance
It is method for POST /nodes/{nodeid}/gws/{gwname}/start
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/start"
return self.client.post(uri, data, headers, query_params, content_type)
def StopGateway(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Stop gateway instance
It is method for POST /nodes/{nodeid}/gws/{gwname}/stop
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname+"/stop"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteGateway(self, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete gateway instance
It is method for DELETE /nodes/{nodeid}/gws/{gwname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname
return self.client.delete(uri, None, headers, query_params, content_type)
def GetGateway(self, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get gateway
It is method for GET /nodes/{nodeid}/gws/{gwname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname
return self.client.get(uri, None, headers, query_params, content_type)
def UpdateGateway(self, data, gwname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Update Gateway
It is method for PUT /nodes/{nodeid}/gws/{gwname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws/"+gwname
return self.client.put(uri, data, headers, query_params, content_type)
def ListGateways(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List running gateways
It is method for GET /nodes/{nodeid}/gws
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateGW(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Create a new gateway
It is method for POST /nodes/{nodeid}/gws
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/gws"
return self.client.post(uri, data, headers, query_params, content_type)
def GetNodeOSInfo(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of the OS of the node
It is method for GET /nodes/{nodeid}/info
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/info"
return self.client.get(uri, None, headers, query_params, content_type)
def KillNodeJob(self, jobid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Kills the job
It is method for DELETE /nodes/{nodeid}/jobs/{jobid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/jobs/"+jobid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetNodeJob(self, jobid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get the details of a submitted job
It is method for GET /nodes/{nodeid}/jobs/{jobid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/jobs/"+jobid
return self.client.get(uri, None, headers, query_params, content_type)
def KillAllNodeJobs(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Kill all running jobs
It is method for DELETE /nodes/{nodeid}/jobs
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/jobs"
return self.client.delete(uri, None, headers, query_params, content_type)
def ListNodeJobs(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List running jobs
It is method for GET /nodes/{nodeid}/jobs
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/jobs"
return self.client.get(uri, None, headers, query_params, content_type)
def GetMemInfo(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information about the memory in the node
It is method for GET /nodes/{nodeid}/mem
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/mem"
return self.client.get(uri, None, headers, query_params, content_type)
def GetNodeMounts(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of all the mountpoints on the node
It is method for GET /nodes/{nodeid}/mounts
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/mounts"
return self.client.get(uri, None, headers, query_params, content_type)
def GetNicInfo(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information about the network interfaces in the node
It is method for GET /nodes/{nodeid}/nics
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/nics"
return self.client.get(uri, None, headers, query_params, content_type)
def PingNode(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Ping this node
It is method for POST /nodes/{nodeid}/ping
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/ping"
return self.client.post(uri, data, headers, query_params, content_type)
def KillNodeProcess(self, processid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Kills the process by sending sigterm signal to the process. If it is still running, a sigkill signal will be sent to the process
It is method for DELETE /nodes/{nodeid}/processes/{processid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/processes/"+processid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetNodeProcess(self, processid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get process details
It is method for GET /nodes/{nodeid}/processes/{processid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/processes/"+processid
return self.client.get(uri, None, headers, query_params, content_type)
def ListNodeProcesses(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get processes
It is method for GET /nodes/{nodeid}/processes
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/processes"
return self.client.get(uri, None, headers, query_params, content_type)
def RebootNode(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Immediately reboot the machine
It is method for POST /nodes/{nodeid}/reboot
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/reboot"
return self.client.post(uri, data, headers, query_params, content_type)
def GetNodeState(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
The aggregated consumption of node + all processes (cpu, memory, etc...)
It is method for GET /nodes/{nodeid}/state
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/state"
return self.client.get(uri, None, headers, query_params, content_type)
def GetStats(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get all statskeys of the node
It is method for GET /nodes/{nodeid}/stats
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/stats"
return self.client.get(uri, None, headers, query_params, content_type)
def DeleteStoragePoolDevice(self, deviceuuid, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Removes the device from the storage pool
It is method for DELETE /nodes/{nodeid}/storagepools/{storagepoolname}/devices/{deviceuuid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/devices/"+deviceuuid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetStoragePoolDeviceInfo(self, deviceuuid, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get information of the device
It is method for GET /nodes/{nodeid}/storagepools/{storagepoolname}/devices/{deviceuuid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/devices/"+deviceuuid
return self.client.get(uri, None, headers, query_params, content_type)
def ListStoragePoolDevices(self, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List the devices in the storage pool
It is method for GET /nodes/{nodeid}/storagepools/{storagepoolname}/devices
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/devices"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateStoragePoolDevices(self, data, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Add extra devices to this storage pool
It is method for POST /nodes/{nodeid}/storagepools/{storagepoolname}/devices
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/devices"
return self.client.post(uri, data, headers, query_params, content_type)
def RollbackFilesystemSnapshot(self, data, snapshotname, filesystemname, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Rollback the file system to the state at the moment the snapshot was taken
It is method for POST /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems/{filesystemname}/snapshots/{snapshotname}/rollback
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems/"+filesystemname+"/snapshots/"+snapshotname+"/rollback"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteFilesystemSnapshot(self, snapshotname, filesystemname, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete snapshot
It is method for DELETE /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems/{filesystemname}/snapshots/{snapshotname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems/"+filesystemname+"/snapshots/"+snapshotname
return self.client.delete(uri, None, headers, query_params, content_type)
def GetFilesystemSnapshotInfo(self, snapshotname, filesystemname, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information on the snapshot
It is method for GET /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems/{filesystemname}/snapshots/{snapshotname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems/"+filesystemname+"/snapshots/"+snapshotname
return self.client.get(uri, None, headers, query_params, content_type)
def ListFilesystemSnapshots(self, filesystemname, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List snapshots of this file system
It is method for GET /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems/{filesystemname}/snapshots
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems/"+filesystemname+"/snapshots"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateSnapshot(self, data, filesystemname, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Create a new read-only snapshot of the current state of the vdisk
It is method for POST /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems/{filesystemname}/snapshots
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems/"+filesystemname+"/snapshots"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteFilesystem(self, filesystemname, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete file system
It is method for DELETE /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems/{filesystemname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems/"+filesystemname
return self.client.delete(uri, None, headers, query_params, content_type)
def GetFilesystemInfo(self, filesystemname, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed file system information
It is method for GET /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems/{filesystemname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems/"+filesystemname
return self.client.get(uri, None, headers, query_params, content_type)
def ListFilesystems(self, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List all file systems
It is method for GET /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateFilesystem(self, data, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Create a new file system
It is method for POST /nodes/{nodeid}/storagepools/{storagepoolname}/filesystems
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname+"/filesystems"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteStoragePool(self, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete the storage pool
It is method for DELETE /nodes/{nodeid}/storagepools/{storagepoolname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname
return self.client.delete(uri, None, headers, query_params, content_type)
def GetStoragePoolInfo(self, storagepoolname, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of this storage pool
It is method for GET /nodes/{nodeid}/storagepools/{storagepoolname}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools/"+storagepoolname
return self.client.get(uri, None, headers, query_params, content_type)
def ListStoragePools(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List storage pools present in the node
It is method for GET /nodes/{nodeid}/storagepools
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateStoragePool(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Create a new storage pool in the node
It is method for POST /nodes/{nodeid}/storagepools
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/storagepools"
return self.client.post(uri, data, headers, query_params, content_type)
def GetVMInfo(self, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get statistical information about the virtual machine.
It is method for GET /nodes/{nodeid}/vms/{vmid}/info
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/info"
return self.client.get(uri, None, headers, query_params, content_type)
def MigrateVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Migrate the virtual machine to another host
It is method for POST /nodes/{nodeid}/vms/{vmid}/migrate
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/migrate"
return self.client.post(uri, data, headers, query_params, content_type)
def PauseVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Pauses the VM
It is method for POST /nodes/{nodeid}/vms/{vmid}/pause
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/pause"
return self.client.post(uri, data, headers, query_params, content_type)
def ResumeVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Resumes the virtual machine
It is method for POST /nodes/{nodeid}/vms/{vmid}/resume
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/resume"
return self.client.post(uri, data, headers, query_params, content_type)
def ShutdownVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Gracefully shutdown the virtual machine
It is method for POST /nodes/{nodeid}/vms/{vmid}/shutdown
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/shutdown"
return self.client.post(uri, data, headers, query_params, content_type)
def StartVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Start the virtual machine
It is method for POST /nodes/{nodeid}/vms/{vmid}/start
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/start"
return self.client.post(uri, data, headers, query_params, content_type)
def StopVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Stops the VM
It is method for POST /nodes/{nodeid}/vms/{vmid}/stop
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid+"/stop"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteVM(self, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Deletes the virtual machine
It is method for DELETE /nodes/{nodeid}/vms/{vmid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetVM(self, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get the virtual machine object
It is method for GET /nodes/{nodeid}/vms/{vmid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid
return self.client.get(uri, None, headers, query_params, content_type)
def UpdateVM(self, data, vmid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Updates the virtual machine
It is method for PUT /nodes/{nodeid}/vms/{vmid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms/"+vmid
return self.client.put(uri, data, headers, query_params, content_type)
def ListVMs(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List all virtual machines
It is method for GET /nodes/{nodeid}/vms
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateVM(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Creates a new virtual machine
It is method for POST /nodes/{nodeid}/vms
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/vms"
return self.client.post(uri, data, headers, query_params, content_type)
def ExitZerotier(self, zerotierid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Exit the ZeroTier network
It is method for DELETE /nodes/{nodeid}/zerotiers/{zerotierid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/zerotiers/"+zerotierid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetZerotier(self, zerotierid, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get ZeroTier network details
It is method for GET /nodes/{nodeid}/zerotiers/{zerotierid}
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/zerotiers/"+zerotierid
return self.client.get(uri, None, headers, query_params, content_type)
def ListZerotier(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
List running ZeroTier networks
It is method for GET /nodes/{nodeid}/zerotiers
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/zerotiers"
return self.client.get(uri, None, headers, query_params, content_type)
def JoinZerotier(self, data, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Join ZeroTier network
It is method for POST /nodes/{nodeid}/zerotiers
"""
uri = self.client.base_url + "/nodes/"+nodeid+"/zerotiers"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteNode(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Delete a node
It is method for DELETE /nodes/{nodeid}
"""
uri = self.client.base_url + "/nodes/"+nodeid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetNode(self, nodeid, headers=None, query_params=None, content_type="application/json"):
"""
Get detailed information of a node
It is method for GET /nodes/{nodeid}
"""
uri = self.client.base_url + "/nodes/"+nodeid
return self.client.get(uri, None, headers, query_params, content_type)
def ListNodes(self, headers=None, query_params=None, content_type="application/json"):
"""
List all nodes
It is method for GET /nodes
"""
uri = self.client.base_url + "/nodes"
return self.client.get(uri, None, headers, query_params, content_type) | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/nodes_service.py | nodes_service.py |
class GraphsService:
def __init__(self, client):
self.client = client
def DeleteDashboard(self, dashboardname, graphid, headers=None, query_params=None, content_type="application/json"):
"""
Delete a dashboard
It is method for DELETE /graphs/{graphid}/dashboards/{dashboardname}
"""
uri = self.client.base_url + "/graphs/"+graphid+"/dashboards/"+dashboardname
return self.client.delete(uri, None, headers, query_params, content_type)
def GetDashboard(self, dashboardname, graphid, headers=None, query_params=None, content_type="application/json"):
"""
Get dashboard
It is method for GET /graphs/{graphid}/dashboards/{dashboardname}
"""
uri = self.client.base_url + "/graphs/"+graphid+"/dashboards/"+dashboardname
return self.client.get(uri, None, headers, query_params, content_type)
def ListDashboards(self, graphid, headers=None, query_params=None, content_type="application/json"):
"""
List dashboards
It is method for GET /graphs/{graphid}/dashboards
"""
uri = self.client.base_url + "/graphs/"+graphid+"/dashboards"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateDashboard(self, data, graphid, headers=None, query_params=None, content_type="application/json"):
"""
Create Dashboard
It is method for POST /graphs/{graphid}/dashboards
"""
uri = self.client.base_url + "/graphs/"+graphid+"/dashboards"
return self.client.post(uri, data, headers, query_params, content_type)
def GetGraph(self, graphid, headers=None, query_params=None, content_type="application/json"):
"""
Get a graph
It is method for GET /graphs/{graphid}
"""
uri = self.client.base_url + "/graphs/"+graphid
return self.client.get(uri, None, headers, query_params, content_type)
def UpdateGraph(self, data, graphid, headers=None, query_params=None, content_type="application/json"):
"""
Update Graph
It is method for PUT /graphs/{graphid}
"""
uri = self.client.base_url + "/graphs/"+graphid
return self.client.put(uri, data, headers, query_params, content_type)
def ListGraphs(self, headers=None, query_params=None, content_type="application/json"):
"""
List all graphs
It is method for GET /graphs
"""
uri = self.client.base_url + "/graphs"
return self.client.get(uri, None, headers, query_params, content_type) | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/graphs_service.py | graphs_service.py |
class VdisksService:
def __init__(self, client):
self.client = client
def ResizeVdisk(self, data, vdiskid, headers=None, query_params=None, content_type="application/json"):
"""
Resize vdisk
It is method for POST /vdisks/{vdiskid}/resize
"""
uri = self.client.base_url + "/vdisks/"+vdiskid+"/resize"
return self.client.post(uri, data, headers, query_params, content_type)
def RollbackVdisk(self, data, vdiskid, headers=None, query_params=None, content_type="application/json"):
"""
Rollback a vdisk to a previous state
It is method for POST /vdisks/{vdiskid}/rollback
"""
uri = self.client.base_url + "/vdisks/"+vdiskid+"/rollback"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteVdisk(self, vdiskid, headers=None, query_params=None, content_type="application/json"):
"""
Delete Vdisk
It is method for DELETE /vdisks/{vdiskid}
"""
uri = self.client.base_url + "/vdisks/"+vdiskid
return self.client.delete(uri, None, headers, query_params, content_type)
def GetVdiskInfo(self, vdiskid, headers=None, query_params=None, content_type="application/json"):
"""
Get vdisk information
It is method for GET /vdisks/{vdiskid}
"""
uri = self.client.base_url + "/vdisks/"+vdiskid
return self.client.get(uri, None, headers, query_params, content_type)
def ListVdisks(self, headers=None, query_params=None, content_type="application/json"):
"""
List vdisks
It is method for GET /vdisks
"""
uri = self.client.base_url + "/vdisks"
return self.client.get(uri, None, headers, query_params, content_type)
def CreateNewVdisk(self, data, headers=None, query_params=None, content_type="application/json"):
"""
Create a new vdisk, can be a copy from an existing vdisk
It is method for POST /vdisks
"""
uri = self.client.base_url + "/vdisks"
return self.client.post(uri, data, headers, query_params, content_type) | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/vdisks_service.py | vdisks_service.py |
import datetime
import time
def generate_rfc3339(d, local_tz=True):
"""
generate rfc3339 time format
input :
d = date type
local_tz = use local time zone if true,
otherwise mark as utc
output :
rfc3339 string date format. ex : `2008-04-02T20:00:00+07:00`
"""
try:
if local_tz:
d = datetime.datetime.fromtimestamp(d)
else:
d = datetime.datetime.utcfromtimestamp(d)
except TypeError:
pass
if not isinstance(d, datetime.date):
raise TypeError('Not timestamp or date object. Got %r.' % type(d))
if not isinstance(d, datetime.datetime):
d = datetime.datetime(*d.timetuple()[:3])
return ('%04d-%02d-%02dT%02d:%02d:%02d%s' %
(d.year, d.month, d.day, d.hour, d.minute, d.second,
_generate_timezone(d, local_tz)))
def _calculate_offset(date, local_tz):
"""
input :
date : date type
local_tz : if true, use system timezone, otherwise return 0
return the date of UTC offset.
If date does not have any timezone info, we use local timezone,
otherwise return 0
"""
if local_tz:
#handle year before 1970 most sytem there is no timezone information before 1970.
if date.year < 1970:
# Use 1972 because 1970 doesn't have a leap day
t = time.mktime(date.replace(year=1972).timetuple)
else:
t = time.mktime(date.timetuple())
# handle daylightsaving, if daylightsaving use altzone, otherwise use timezone
if time.localtime(t).tm_isdst:
return -time.altzone
else:
return -time.timezone
else:
return 0
def _generate_timezone(date, local_tz):
"""
input :
date : date type
local_tz : bool
offset generated from _calculate_offset
offset in seconds
offset = 0 -> +00:00
offset = 1800 -> +00:30
offset = -3600 -> -01:00
"""
offset = _calculate_offset(date, local_tz)
hour = abs(offset) // 3600
minute = abs(offset) % 3600 // 60
if offset < 0:
return '%c%02d:%02d' % ("-", hour, minute)
else:
return '%c%02d:%02d' % ("+", hour, minute) | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/client_utils.py | client_utils.py |
import requests
from .Bridge import Bridge
from .BridgeCreate import BridgeCreate
from .BridgeCreateSetting import BridgeCreateSetting
from .CPUInfo import CPUInfo
from .CPUStats import CPUStats
from .CloudInit import CloudInit
from .Cluster import Cluster
from .ClusterCreate import ClusterCreate
from .Container import Container
from .ContainerListItem import ContainerListItem
from .ContainerNIC import ContainerNIC
from .ContainerNICconfig import ContainerNICconfig
from .CoreStateResult import CoreStateResult
from .CoreSystem import CoreSystem
from .CreateContainer import CreateContainer
from .CreateSnapshotReqBody import CreateSnapshotReqBody
from .DHCP import DHCP
from .Dashboard import Dashboard
from .DashboardListItem import DashboardListItem
from .DeleteFile import DeleteFile
from .DiskInfo import DiskInfo
from .DiskPartition import DiskPartition
from .EnumBridgeCreateNetworkMode import EnumBridgeCreateNetworkMode
from .EnumBridgeStatus import EnumBridgeStatus
from .EnumClusterCreateClusterType import EnumClusterCreateClusterType
from .EnumClusterCreateDriveType import EnumClusterCreateDriveType
from .EnumClusterDriveType import EnumClusterDriveType
from .EnumClusterStatus import EnumClusterStatus
from .EnumContainerListItemStatus import EnumContainerListItemStatus
from .EnumContainerNICStatus import EnumContainerNICStatus
from .EnumContainerNICType import EnumContainerNICType
from .EnumContainerStatus import EnumContainerStatus
from .EnumDiskInfoType import EnumDiskInfoType
from .EnumGWNICType import EnumGWNICType
from .EnumGetGWStatus import EnumGetGWStatus
from .EnumJobResultName import EnumJobResultName
from .EnumJobResultState import EnumJobResultState
from .EnumNicLinkType import EnumNicLinkType
from .EnumNodeStatus import EnumNodeStatus
from .EnumStoragePoolCreateDataProfile import EnumStoragePoolCreateDataProfile
from .EnumStoragePoolCreateMetadataProfile import EnumStoragePoolCreateMetadataProfile
from .EnumStoragePoolDataProfile import EnumStoragePoolDataProfile
from .EnumStoragePoolDeviceStatus import EnumStoragePoolDeviceStatus
from .EnumStoragePoolListItemStatus import EnumStoragePoolListItemStatus
from .EnumStoragePoolMetadataProfile import EnumStoragePoolMetadataProfile
from .EnumStoragePoolStatus import EnumStoragePoolStatus
from .EnumStorageServerStatus import EnumStorageServerStatus
from .EnumVMListItemStatus import EnumVMListItemStatus
from .EnumVMStatus import EnumVMStatus
from .EnumVdiskCreateType import EnumVdiskCreateType
from .EnumVdiskListItemStatus import EnumVdiskListItemStatus
from .EnumVdiskListItemType import EnumVdiskListItemType
from .EnumVdiskStatus import EnumVdiskStatus
from .EnumVdiskType import EnumVdiskType
from .EnumZerotierListItemType import EnumZerotierListItemType
from .EnumZerotierType import EnumZerotierType
from .Filesystem import Filesystem
from .FilesystemCreate import FilesystemCreate
from .GW import GW
from .GWCreate import GWCreate
from .GWHost import GWHost
from .GWNIC import GWNIC
from .GWNICconfig import GWNICconfig
from .GetGW import GetGW
from .Graph import Graph
from .HTTPProxy import HTTPProxy
from .HTTPType import HTTPType
from .HealthCheck import HealthCheck
from .IPProtocol import IPProtocol
from .Job import Job
from .JobListItem import JobListItem
from .JobResult import JobResult
from .ListGW import ListGW
from .MemInfo import MemInfo
from .NicInfo import NicInfo
from .NicLink import NicLink
from .Node import Node
from .NodeHealthCheck import NodeHealthCheck
from .NodeMount import NodeMount
from .OSInfo import OSInfo
from .PortForward import PortForward
from .Process import Process
from .ProcessSignal import ProcessSignal
from .Snapshot import Snapshot
from .StoragePool import StoragePool
from .StoragePoolCreate import StoragePoolCreate
from .StoragePoolDevice import StoragePoolDevice
from .StoragePoolListItem import StoragePoolListItem
from .StorageServer import StorageServer
from .VDiskLink import VDiskLink
from .VM import VM
from .VMCreate import VMCreate
from .VMDiskInfo import VMDiskInfo
from .VMInfo import VMInfo
from .VMListItem import VMListItem
from .VMMigrate import VMMigrate
from .VMNicInfo import VMNicInfo
from .VMUpdate import VMUpdate
from .Vdisk import Vdisk
from .VdiskCreate import VdiskCreate
from .VdiskListItem import VdiskListItem
from .VdiskResize import VdiskResize
from .VdiskRollback import VdiskRollback
from .WriteFile import WriteFile
from .Zerotier import Zerotier
from .ZerotierBridge import ZerotierBridge
from .ZerotierJoin import ZerotierJoin
from .ZerotierListItem import ZerotierListItem
from .ZerotierRoute import ZerotierRoute
from .client import Client as APIClient
from .oauth2_client_itsyouonline import Oauth2ClientItsyouonline
class Client:
def __init__(self, base_uri=""):
self.api = APIClient(base_uri)
self.oauth2_client_itsyouonline = Oauth2ClientItsyouonline() | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/__init__.py | __init__.py |
from enum import Enum
from .Partition import Partition
from .abstracts import Mountable
class DiskType(Enum):
ssd = "ssd"
hdd = "hdd"
nvme = "nvme"
archive = "archive"
cdrom = 'cdrom'
class Disks:
"""Subobject to list disks"""
def __init__(self, node):
self.node = node
self._client = node.client
def list(self):
"""
List of disks on the node
"""
disks = []
disk_list = self._client.disk.list()
if 'blockdevices' in disk_list:
for disk_info in self._client.disk.list()['blockdevices']:
disks.append(Disk(
node=self.node,
disk_info=disk_info
))
return disks
def get(self, name):
"""
return the disk called `name`
@param name: name of the disk
"""
for disk in self.list():
if disk.name == name:
return disk
return None
class Disk(Mountable):
"""Disk in a G8OS"""
def __init__(self, node, disk_info):
"""
disk_info: dict returned by client.disk.list()
"""
# g8os client to talk to the node
self.node = node
self._client = node.client
self.name = None
self.size = None
self.blocksize = None
self.partition_table = None
self.mountpoint = None
self.model = None
self._filesystems = []
self.type = None
self.partitions = []
self._load(disk_info)
@property
def devicename(self):
return "/dev/{}".format(self.name)
@property
def filesystems(self):
self._populate_filesystems()
return self._filesystems
def _load(self, disk_info):
self.name = disk_info['name']
detail = self._client.disk.getinfo(self.name)
self.size = int(disk_info['size'])
self.blocksize = detail['blocksize']
if detail['table'] != 'unknown':
self.partition_table = detail['table']
self.mountpoint = disk_info['mountpoint']
self.model = disk_info['model']
self.type = self._disk_type(disk_info)
for partition_info in disk_info.get('children', []) or []:
self.partitions.append(
Partition(
disk=self,
part_info=partition_info)
)
def _populate_filesystems(self):
"""
look into all the btrfs filesystem and populate
the filesystems attribute of the class with the detail of
all the filesystem present on the disk
"""
self._filesystems = []
for fs in (self._client.btrfs.list() or []):
for device in fs['devices']:
if device['path'] == "/dev/{}".format(self.name):
self._filesystems.append(fs)
break
def _disk_type(self, disk_info):
"""
return the type of the disk
"""
if disk_info['rota'] == "1":
if disk_info['type'] == 'rom':
return DiskType.cdrom
# assume that if a disk is more than 7TB it's a SMR disk
elif int(disk_info['size']) > (1024 * 1024 * 1024 * 1024 * 7):
return DiskType.archive
else:
return DiskType.hdd
else:
if "nvme" in disk_info['name']:
return DiskType.nvme
else:
return DiskType.ssd
def mktable(self, table_type='gpt', overwrite=False):
"""
create a partition table on the disk
@param table_type: Partition table type as accepted by parted
@param overwrite: erase any existing partition table
"""
if self.partition_table is not None and overwrite is False:
return
self._client.disk.mktable(
disk=self.name,
table_type=table_type
)
def mkpart(self, start, end, part_type="primary"):
"""
@param start: partition start as accepted by parted mkpart
@param end: partition end as accepted by parted mkpart
@param part_type: partition type as accepted by parted mkpart
"""
before = {p.name for p in self.partitions}
self._client.disk.mkpart(
self.name,
start=start,
end=end,
part_type=part_type,
)
after = {}
for disk in self._client.disk.list()['blockdevices']:
if disk['name'] != self.name:
continue
for part in disk.get('children', []):
after[part['name']] = part
name = set(after.keys()) - before
part_info = after[list(name)[0]]
partition = Partition(
disk=self,
part_info=part_info)
self.partitions.append(partition)
return partition
def __str__(self):
return "Disk <{}>".format(self.name)
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.devicename == other.devicename | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/Disk.py | Disk.py |
from .abstracts import Mountable
import os
import time
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def _prepare_device(node, devicename):
logger.debug("prepare device %s", devicename)
ss = devicename.split('/')
if len(ss) < 3:
raise RuntimeError("bad device name: {}".format(devicename))
name = ss[2]
disk = node.disks.get(name)
if disk is None:
raise ValueError("device {} not found".format(name))
node.client.system('parted -s /dev/{} mklabel gpt mkpart primary 1m 100%'.format(name)).get()
now = time.time()
# check partitions is ready and writable
while now + 60 > time.time():
try:
disk = node.disks.get(name)
if len(disk.partitions) > 0:
partition = disk.partitions[0]
resp = node.client.bash('test -b {0} && dd if={0} of=/dev/null bs=4k count=1024'.format(partition.devicename)).get()
if resp.state == 'SUCCESS':
return partition
except:
time.sleep(1)
continue
else:
raise RuntimeError("Failed to create partition")
class StoragePools:
def __init__(self, node):
self.node = node
self._client = node._client
def list(self):
storagepools = []
btrfs_list = self._client.btrfs.list()
if btrfs_list:
for btrfs in self._client.btrfs.list():
if btrfs['label'].startswith('sp_'):
name = btrfs['label'].split('_', 1)[1]
devicenames = [device['path'] for device in btrfs['devices']]
storagepools.append(StoragePool(self.node, name, devicenames))
return storagepools
def get(self, name):
for pool in self.list():
if pool.name == name:
return pool
raise ValueError("Could not find StoragePool with name {}".format(name))
def create(self, name, devices, metadata_profile, data_profile, overwrite=False):
label = 'sp_{}'.format(name)
logger.debug("create storagepool %s", label)
device_names = []
for device in devices:
part = _prepare_device(self.node, device)
device_names.append(part.devicename)
self._client.btrfs.create(label, device_names, metadata_profile, data_profile, overwrite=overwrite)
pool = StoragePool(self.node, name, device_names)
return pool
class StoragePool(Mountable):
def __init__(self, node, name, devices):
self.node = node
self._client = node._client
self.devices = devices
self.name = name
self._mountpoint = None
self._ays = None
@property
def devicename(self):
return 'UUID={}'.format(self.uuid)
def mount(self, target=None):
if target is None:
target = os.path.join('/mnt/storagepools/{}'.format(self.name))
return super().mount(target)
def delete(self, zero=True):
"""
Destroy storage pool
param zero: write zeros (nulls) to the first 500MB of each disk in this storagepool
"""
if self.mountpoint:
self.umount()
partitionmap = {}
for disk in self.node.disks.list():
for partition in disk.partitions:
partitionmap[partition.name] = partition
for device in self.devices:
diskpath = os.path.basename(device)
partition = partitionmap.get(diskpath)
if partition:
disk = partition.disk
self._client.disk.rmpart(disk.name, 1)
if zero:
self._client.bash('test -b /dev/{0} && dd if=/dev/zero bs=1M count=500 of=/dev/{0}'.format(diskpath)).get()
return True
return False
@property
def mountpoint(self):
mounts = self.node.list_mounts()
for device in self.devices:
for mount in mounts:
if mount.device == device:
options = mount.options.split(',')
if 'subvol=/' in options:
return mount.mountpoint
def is_device_used(self, device):
"""
check if the device passed as argument is already part of this storagepool
@param device: str e.g: /dev/sda
"""
for d in self.devices:
if d.startswith(device):
return True
return False
def device_add(self, *devices):
to_add = []
for device in devices:
if self.is_device_used(device):
continue
part = _prepare_device(self.node, device)
logger.debug("add device %s to %s", device, self)
to_add.append(part.devicename)
self._client.btrfs.device_add(self._get_mountpoint(), *to_add)
self.devices.extend(to_add)
def device_remove(self, *devices):
self._client.btrfs.device_remove(self._get_mountpoint(), *devices)
for device in devices:
if device in self.devices:
logger.debug("remove device %s to %s", device, self)
self.devices.remove(device)
@property
def fsinfo(self):
if self.mountpoint is None:
raise ValueError("can't get fsinfo if storagepool is not mounted")
return self._client.btrfs.info(self.mountpoint)
@mountpoint.setter
def mountpoint(self, value):
# do not do anything mountpoint is dynamic
return
def _get_mountpoint(self):
mountpoint = self.mountpoint
if not mountpoint:
raise RuntimeError("Can not perform action when filesystem is not mounted")
return mountpoint
@property
def info(self):
for fs in self._client.btrfs.list():
if fs['label'] == 'sp_{}'.format(self.name):
return fs
return None
def raw_list(self):
mountpoint = self._get_mountpoint()
return self._client.btrfs.subvol_list(mountpoint) or []
def get_devices_and_status(self):
device_map = []
disks = self._client.disk.list()['blockdevices']
pool_status = 'healthy'
for device in self.devices:
info = None
for disk in disks:
disk_name = "/dev/%s" % disk['kname']
if device == disk_name and disk['mountpoint']:
info = disk
break
for part in disk.get('children', []) or []:
if device == "/dev/%s" % part['kname']:
info = part
break
if info:
break
status = 'healthy'
if info['subsystems'] != 'block:virtio:pci':
result = self._client.bash("smartctl -H %s > /dev/null ;echo $?" % disk_name).get()
exit_status = int(result.stdout)
if exit_status & 1 << 0:
status = "unknown"
pool_status = 'degraded'
if (exit_status & 1 << 2) or (exit_status & 1 << 3):
status = 'degraded'
pool_status = 'degraded'
device_map.append({
'device': device,
'partUUID': info['partuuid'] or '' if info else '',
'status': status,
})
return device_map, pool_status
def list(self):
subvolumes = []
for subvol in self.raw_list():
path = subvol['Path']
type_, _, name = path.partition('/')
if type_ == 'filesystems':
subvolumes.append(FileSystem(name, self))
return subvolumes
def get(self, name):
"""
Get Filesystem
"""
for filesystem in self.list():
if filesystem.name == name:
return filesystem
raise ValueError("Could not find filesystem with name {}".format(name))
def exists(self, name):
"""
Check if filesystem with name exists
"""
for subvolume in self.list():
if subvolume.name == name:
return True
return False
def create(self, name, quota=None):
"""
Create filesystem
"""
logger.debug("Create filesystem %s on %s", name, self)
mountpoint = self._get_mountpoint()
fspath = os.path.join(mountpoint, 'filesystems')
self._client.filesystem.mkdir(fspath)
subvolpath = os.path.join(fspath, name)
self._client.btrfs.subvol_create(subvolpath)
if quota:
pass
return FileSystem(name, self)
@property
def size(self):
total = 0
fs = self.info
if fs:
for device in fs['devices']:
total += device['size']
return total
@property
def uuid(self):
fs = self.info
if fs:
return fs['uuid']
return None
@property
def used(self):
total = 0
fs = self.info
if fs:
for device in fs['devices']:
total += device['used']
return total
@property
def ays(self):
if self._ays is None:
from zeroos.orchestrator.sal.atyourservice.StoragePool import StoragePoolAys
self._ays = StoragePoolAys(self)
return self._ays
def __repr__(self):
return "StoragePool <{}>".format(self.name)
class FileSystem:
def __init__(self, name, pool):
self.name = name
self.pool = pool
self._client = pool.node.client
self.subvolume = "filesystems/{}".format(name)
self.path = os.path.join(self.pool.mountpoint, self.subvolume)
self.snapshotspath = os.path.join(self.pool.mountpoint, 'snapshots', self.name)
self._ays = None
def delete(self, includesnapshots=True):
"""
Delete filesystem
"""
paths = [fs['Path'] for fs in self._client.btrfs.subvol_list(self.path)]
paths.sort(reverse=True)
for path in paths:
rpath = os.path.join(self.path, os.path.relpath(path, self.subvolume))
self._client.btrfs.subvol_delete(rpath)
self._client.btrfs.subvol_delete(self.path)
if includesnapshots:
for snapshot in self.list():
snapshot.delete()
self._client.filesystem.remove(self.snapshotspath)
def get(self, name):
"""
Get snapshot
"""
for snap in self.list():
if snap.name == name:
return snap
raise ValueError("Could not find snapshot {}".format(name))
def list(self):
"""
List snapshots
"""
snapshots = []
if self._client.filesystem.exists(self.snapshotspath):
for fileentry in self._client.filesystem.list(self.snapshotspath):
if fileentry['is_dir']:
snapshots.append(Snapshot(fileentry['name'], self))
return snapshots
def exists(self, name):
"""
Check if a snapshot exists
"""
return name in self.list()
def create(self, name):
"""
Create snapshot
"""
logger.debug("create snapshot %s on %s", name, self.pool)
snapshot = Snapshot(name, self)
if self.exists(name):
raise RuntimeError("Snapshot path {} exists.")
self._client.filesystem.mkdir(self.snapshotspath)
self._client.btrfs.subvol_snapshot(self.path, snapshot.path)
return snapshot
@property
def ays(self):
if self._ays is None:
from JumpScale.sal.g8os.atyourservice.StoragePool import FileSystemAys
self._ays = FileSystemAys(self)
return self._ays
def __repr__(self):
return "FileSystem <{}: {!r}>".format(self.name, self.pool)
class Snapshot:
def __init__(self, name, filesystem):
self.filesystem = filesystem
self._client = filesystem.pool.node.client
self.name = name
self.path = os.path.join(self.filesystem.snapshotspath, name)
self.subvolume = "snapshots/{}/{}".format(self.filesystem.name, name)
def rollback(self):
self.filesystem.delete(False)
self._client.btrfs.subvol_snapshot(self.path, self.filesystem.path)
def delete(self):
self._client.btrfs.subvol_delete(self.path)
def __repr__(self):
return "Snapshot <{}: {!r}>".format(self.name, self.filesystem) | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/StoragePool.py | StoragePool.py |
from zerotier.client import Client
import netaddr
class ZTBootstrap:
def __init__(self, token, bootstap_id, grid_id, cidr):
self.bootstap_nwid = bootstap_id
self.grid_nwid = grid_id
self._cidr = cidr # TODO validate format
# create client and set the authentication header
self._zt = Client()
self._zt.set_auth_header("Bearer " + token)
def configure_routes(self):
for nwid in [self.bootstap_nwid, self.grid_nwid]:
resp = self._zt.network.getNetwork(nwid)
resp.raise_for_status()
nw = resp.json()
nw['config']['routes'] = [{'target': self._cidr, 'via': None}]
self._zt.network.updateNetwork(nw, nwid).raise_for_status()
def list_join_request(self):
"""
return a list of member that try to access the bootstap network
"""
resp = self._zt.network.listMembers(id=self.bootstap_nwid)
resp.raise_for_status()
requests = []
for member in resp.json():
if not member['online'] or member['config']['authorized']:
continue
requests.append(member)
return requests
def assign_ip(self, nwid, member, ip=None):
"""
Assign an Ip address to a member in a certain network
@nwid : id of the network
@member : member object
@ip: ip address to assing to the member, if None take the next free IP in the range
"""
if ip is None:
ip = self._find_free_ip(nwid)
member['config']['authorized'] = True
member['config']['ipAssignments'] = [ip]
resp = self._zt.network.updateMember(member, member['nodeId'], nwid)
resp.raise_for_status()
return ip
def unauthorize_member(self, nwid, member):
member['config']['authorized'] = False
member['config']['ipAssignments'] = []
resp = self._zt.network.updateMember(member, member['nodeId'], nwid)
resp.raise_for_status()
def _find_free_ip(self, nwid):
resp = self._zt.network.listMembers(nwid)
resp.raise_for_status()
all_ips = list(netaddr.IPNetwork(self._cidr))
for member in resp.json():
for addr in member['config']['ipAssignments']:
all_ips.remove(netaddr.IPAddress(addr))
if len(all_ips) <= 0:
raise RuntimeError("No more free ip in the range %s" % self._cidr)
return str(all_ips[0])
if __name__ == '__main__':
token = '4gE9Cfqw2vFFzCPC1BYaj2mbSpNScxJx'
bootstap_nwid = '17d709436c993670'
grid_nwid = 'a09acf02336ce8b5'
zt = ZTBootstrap(token, bootstap_nwid, grid_nwid, '192.168.10.0/24')
from IPython import embed; embed() | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/ZerotierBootstrap.py | ZerotierBootstrap.py |
import json
from io import BytesIO
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Containers:
def __init__(self, node):
self.node = node
def list(self):
containers = []
for container in self.node.client.container.list().values():
try:
containers.append(Container.from_containerinfo(container, self.node))
except ValueError:
# skip containers withouth tags
pass
return containers
def get(self, name):
containers = list(self.node.client.container.find(name).values())
if not containers:
raise LookupError("Could not find container with name {}".format(name))
if len(containers) > 1:
raise LookupError("Found more than one containter with name {}".format(name))
return Container.from_containerinfo(containers[0], self.node)
def create(self, name, flist, hostname=None, mounts=None, nics=None,
host_network=False, ports=None, storage=None, init_processes=None, privileged=False):
logger.debug("create container %s", name)
container = Container(name, self.node, flist, hostname, mounts, nics,
host_network, ports, storage, init_processes, privileged)
container.start()
return container
class Container:
"""G8SO Container"""
def __init__(self, name, node, flist, hostname=None, mounts=None, nics=None,
host_network=False, ports=None, storage=None, init_processes=None,
privileged=False, identity=None):
"""
TODO: write doc string
filesystems: dict {filesystemObj: target}
"""
self.name = name
self.node = node
self.mounts = mounts or {}
self.hostname = hostname
self.flist = flist
self.ports = ports or {}
self.nics = nics or []
self.host_network = host_network
self.storage = storage
self.init_processes = init_processes or []
self._client = None
self.privileged = privileged
self.identity = identity
self._ays = None
for nic in self.nics:
nic.pop('token', None)
if nic.get('config', {}).get('gateway', ''):
nic['monitor'] = True
@classmethod
def from_containerinfo(cls, containerinfo, node):
logger.debug("create container from info")
arguments = containerinfo['container']['arguments']
if not arguments['tags']:
# we don't deal with tagless containers
raise ValueError("Could not load containerinfo withouth tags")
return cls(arguments['tags'][0],
node,
arguments['root'],
arguments['hostname'],
arguments['mount'],
arguments['nics'],
arguments['host_network'],
arguments['port'],
arguments['storage'],
arguments['privileged'],
arguments['identity'])
@classmethod
def from_ays(cls, service, password=None):
logger.debug("create container from service (%s)", service)
from .Node import Node
node = Node.from_ays(service.parent, password)
ports = {}
for portmap in service.model.data.ports:
source, dest = portmap.split(':')
ports[int(source)] = int(dest)
nics = [nic.to_dict() for nic in service.model.data.nics]
mounts = {}
for mount in service.model.data.mounts:
fs_service = service.aysrepo.serviceGet('filesystem', mount.filesystem)
try:
sp = node.storagepools.get(fs_service.parent.name)
fs = sp.get(fs_service.name)
except KeyError:
continue
mounts[fs.path] = mount.target
container = cls(
name=service.name,
node=node,
mounts=mounts,
nics=nics,
hostname=service.model.data.hostname,
flist=service.model.data.flist,
ports=ports,
host_network=service.model.data.hostNetworking,
storage=service.model.data.storage,
init_processes=[p.to_dict() for p in service.model.data.initProcesses],
privileged=service.model.data.privileged,
identity=service.model.data.identity,
)
return container
@property
def id(self):
logger.debug("get container id")
info = self.info
if info:
return info['container']['id']
return
@property
def info(self):
logger.debug("get container info")
for containerid, container in self.node.client.container.list().items():
if self.name in (container['container']['arguments']['tags'] or []):
container['container']['id'] = int(containerid)
return container
return
@property
def client(self):
if self._client is None:
self._client = self.node.client.container.client(self.id)
return self._client
def upload_content(self, remote, content):
if isinstance(content, str):
content = content.encode('utf8')
bytes = BytesIO(content)
self.client.filesystem.upload(remote, bytes)
def download_content(self, remote):
buff = BytesIO()
self.client.filesystem.download(remote, buff)
return buff.getvalue().decode()
def _create_container(self, timeout=60):
logger.debug("send create container command to g8os")
tags = [self.name]
if self.hostname and self.hostname != self.name:
tags.append(self.hostname)
job = self.node.client.container.create(
root_url=self.flist,
mount=self.mounts,
host_network=self.host_network,
nics=self.nics,
port=self.ports,
tags=tags,
hostname=self.hostname,
storage=self.storage,
privileged=self.privileged,
identity=self.identity,
)
containerid = job.get(timeout)
self._client = self.node.client.container.client(containerid)
def is_job_running(self, cmd):
try:
for job in self._client.job.list():
arguments = job['cmd']['arguments']
if 'name' in arguments and arguments['name'] == cmd:
return job
return False
except Exception as err:
if str(err).find("invalid container id"):
return False
raise
def is_port_listening(self, port, timeout=60):
import time
start = time.time()
while start + timeout > time.time():
if port not in self.node.freeports(port, nrports=3):
return True
time.sleep(0.2)
return False
def start(self):
if not self.is_running():
logger.debug("start %s", self)
self._create_container()
for process in self.init_processes:
cmd = "{} {}".format(process['name'], ' '.join(process.get('args', [])))
pwd = process.get('pwd', '')
stdin = process.get('stdin', '')
env = {}
for x in process.get('environment', []):
k, v = x.split("=")
env[k] = v
self.client.system(command=cmd, dir=pwd, stdin=stdin, env=env)
def stop(self):
if not self.is_running():
return
logger.debug("stop %s", self)
self.node.client.container.terminate(self.id)
self._client = None
def is_running(self):
return self.id is not None
@property
def ays(self):
if self._ays is None:
from JumpScale.sal.g8os.atyourservice.StorageCluster import ContainerAYS
self._ays = ContainerAYS(self)
return self._ays
def __str__(self):
return "Container <{}>".format(self.name)
def __repr__(self):
return str(self) | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/Container.py | Container.py |
from js9 import j
import os
from zeroos.core0.client.client import Timeout
import json
import hashlib
class HealthCheckObject:
def __init__(self, id, name, category, resource):
self.id = id
self.name = name
self.category = category
self._messages = []
self.resource = resource
self.stacktrace = ''
def add_message(self, id, status, text):
self._messages.append({'id': id, 'text': text, 'status': status})
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'category': self.category,
'resource': self.resource,
'stacktrace': self.stacktrace or '',
'messages': self._messages
}
class HealthCheckRun(HealthCheckObject):
def start(self, *args, **kwargs):
try:
self.run(*args, **kwargs)
except Exception as e:
eco = j.errorhandler.parsePythonExceptionObject(e)
self.stacktrace = eco.traceback
return self.to_dict()
class IPMIHealthCheck(HealthCheckRun):
def execute_ipmi(self, container, cmd):
if self.node.client.filesystem.exists("/dev/ipmi") or self.node.client.filesystem.exists("/dev/ipmi0"):
return container.client.system(cmd).get().stdout
return ''
class ContainerContext:
def __init__(self, node, flist):
self.node = node
self.flist = flist
self.container = None
self._name = 'healthcheck_{}'.format(hashlib.md5(flist.encode()).hexdigest())
def __enter__(self):
try:
self.container = self.node.containers.get(self._name)
except LookupError:
self.container = self.node.containers.create(self._name, self.flist, host_network=True, privileged=True)
return self.container
def __exit__(self, exc_type, exc_val, exc_tb):
return
class HealthCheck:
def __init__(self, node):
self.node = node
self.healtcheckfolder = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'healthchecks')
def with_container(self, flist):
return ContainerContext(self.node, flist)
def run(self, container, name, timeout=None):
try:
healthcheckfile = os.path.join(self.healtcheckfolder, name + '.py')
if not os.path.exists(healthcheckfile):
raise RuntimeError("Healtcheck with name {} not found".format(name))
container.client.filesystem.upload_file('/tmp/{}.py'.format(name), healthcheckfile)
try:
job = container.client.bash('python3 /tmp/{}.py'.format(name))
response = job.get(timeout)
except Timeout:
container.client.job.kill(job.id, 9)
raise RuntimeError("Failed to execute {} on time".format(name))
if response.state == 'ERROR':
raise RuntimeError("Failed to execute {} {}".format(name, response.stdout))
try:
return json.loads(response.stdout)
except Exception:
raise RuntimeError("Failed to parse response of {}".format(name))
except Exception as e:
healtcheck = {
'id': name,
'status': 'ERROR',
'message': str(e)
}
return healtcheck
def cpu_mem(self):
from .healthchecks.cpu_mem_core import CPU, Memory
cpu = CPU(self.node)
memory = Memory(self.node)
return [cpu.start(), memory.start()]
def disk_usage(self):
from .healthchecks.diskusage import DiskUsage
usage = DiskUsage(self.node)
return usage.start()
def network_bond(self):
from .healthchecks.networkbond import NetworkBond
bond = NetworkBond(self.node)
return bond.start()
def node_temperature(self, container):
from .healthchecks.temperature import Temperature
temperature = Temperature(self.node)
result = temperature.start(container)
return result
def network_stability(self, nodes):
from .healthchecks.networkstability import NetworkStability
stability = NetworkStability(self.node)
return stability.start(nodes)
def rotate_logs(self):
from .healthchecks.log_rotator import RotateLogs
rotator = RotateLogs(self.node)
return rotator.start()
def openfiledescriptors(self):
from .healthchecks.openfiledescriptors import OpenFileDescriptor
ofd = OpenFileDescriptor(self.node)
return ofd.start()
def interrupts(self):
from .healthchecks.interrupts import Interrupts
inter = Interrupts(self.node)
return inter.start()
def threads(self):
from .healthchecks.threads import Threads
thread = Threads(self.node)
return thread.start()
def ssh_cleanup(self, job):
from .healthchecks.ssh_cleanup import SSHCleanup
cleaner = SSHCleanup(self.node, job)
return cleaner.start()
def powersupply(self, container):
from .healthchecks.powersupply import PowerSupply
powersupply = PowerSupply(self.node)
return powersupply.start(container)
def fan(self, container):
from .healthchecks.fan import Fan
fan = Fan(self.node)
return fan.start(container)
def context_switch(self):
from .healthchecks.context_switch import ContextSwitch
return ContextSwitch(self.node).start()
def network_load(self):
from .healthchecks.networkload import NetworkLoad
load = NetworkLoad(self.node)
return load.start() | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthcheck.py | healthcheck.py |
from zeroos.core0.client import Client
from .Disk import Disks, DiskType
from .Container import Containers
from .StoragePool import StoragePools
from .Network import Network
from .healthcheck import HealthCheck
from collections import namedtuple
from datetime import datetime
from io import BytesIO
import netaddr
Mount = namedtuple('Mount', ['device', 'mountpoint', 'fstype', 'options'])
class Node:
"""Represent a G8OS Server"""
def __init__(self, addr, port=6379, password=None, timeout=120):
# g8os client to talk to the node
self._client = Client(host=addr, port=port, password=password, timeout=timeout)
self._storageAddr = None
self.addr = addr
self.port = port
self.disks = Disks(self)
self.storagepools = StoragePools(self)
self.containers = Containers(self)
self.network = Network(self)
self.healthcheck = HealthCheck(self)
@classmethod
def from_ays(cls, service, password=None, timeout=120):
return cls(
addr=service.model.data.redisAddr,
port=service.model.data.redisPort,
password=password,
timeout=timeout
)
@property
def client(self):
return self._client
@property
def name(self):
def get_nic_hwaddr(nics, name):
for nic in nics:
if nic['name'] == name:
return nic['hardwareaddr']
defaultgwdev = self.client.bash("ip route | grep default | awk '{print $5}'").get().stdout.strip()
nics = self.client.info.nic()
macgwdev = None
if defaultgwdev:
macgwdev = get_nic_hwaddr(nics, defaultgwdev)
if not macgwdev:
raise AttributeError("name not find for node {}".format(self))
return macgwdev.replace(":", '')
@property
def storageAddr(self):
if not self._storageAddr:
nic_data = self.client.info.nic()
for nic in nic_data:
if nic['name'] == 'backplane':
for ip in nic['addrs']:
network = netaddr.IPNetwork(ip['addr'])
if network.version == 4:
self._storageAddr = network.ip.format()
return self._storageAddr
self._storageAddr = self.addr
return self._storageAddr
def get_nic_by_ip(self, addr):
try:
res = next(nic for nic in self._client.info.nic() if any(addr == a['addr'].split('/')[0] for a in nic['addrs']))
return res
except StopIteration:
return None
def _eligible_fscache_disk(self, disks):
"""
return the first disk that is eligible to be used as filesystem cache
First try to find a SSH disk, otherwise return a HDD
"""
priorities = [DiskType.ssd, DiskType.hdd, DiskType.nvme]
eligible = {t: [] for t in priorities}
# Pick up the first ssd
usedisks = []
for pool in (self._client.btrfs.list() or []):
for device in pool['devices']:
usedisks.append(device['path'])
for disk in disks[::-1]:
if disk.devicename in usedisks or len(disk.partitions) > 0:
continue
if disk.type in priorities:
eligible[disk.type].append(disk)
# pick up the first disk according to priorities
for t in priorities:
if eligible[t]:
return eligible[t][0]
else:
raise RuntimeError("cannot find eligible disks for the fs cache")
def _mount_fscache(self, storagepool):
"""
mount the fscache storage pool and copy the content of the in memmory fs inside
"""
mountedpaths = [mount.mountpoint for mount in self.list_mounts()]
containerpath = '/var/cache/containers'
if containerpath not in mountedpaths:
if storagepool.exists('containercache'):
storagepool.get('containercache').delete()
fs = storagepool.create('containercache')
self.client.disk.mount(storagepool.devicename, containerpath, ['subvol={}'.format(fs.subvolume)])
logpath = '/var/log'
if logpath not in mountedpaths:
# logs is empty filesystem which we create a snapshot on to store logs of current boot
snapname = '{:%Y-%m-%d-%H-%M}'.format(datetime.now())
fs = storagepool.get('logs')
snapshot = fs.create(snapname)
self.client.bash('mkdir /tmp/log && mv /var/log/* /tmp/log/')
self.client.disk.mount(storagepool.devicename, logpath, ['subvol={}'.format(snapshot.subvolume)])
self.client.bash('mv /tmp/log/* /var/log/').get()
self.client.logger.reopen()
# startup syslogd and klogd
self.client.system('syslogd -n -O /var/log/messages')
self.client.system('klogd -n')
def freeports(self, baseport=2000, nrports=3):
ports = self.client.info.port()
usedports = set()
for portInfo in ports:
if portInfo['network'] != "tcp":
continue
usedports.add(portInfo['port'])
freeports = []
while True:
if baseport not in usedports:
freeports.append(baseport)
if len(freeports) >= nrports:
return freeports
baseport += 1
def find_persistance(self, name='fscache'):
fscache_sp = None
for sp in self.storagepools.list():
if sp.name == name:
fscache_sp = sp
break
return fscache_sp
def ensure_persistance(self, name='fscache'):
"""
look for a disk not used,
create a partition and mount it to be used as cache for the g8ufs
set the label `fs_cache` to the partition
"""
disks = self.disks.list()
if len(disks) <= 0:
# if no disks, we can't do anything
return
# check if there is already a storage pool with the fs_cache label
fscache_sp = self.find_persistance(name)
# create the storage pool if we don't have one yet
if fscache_sp is None:
disk = self._eligible_fscache_disk(disks)
fscache_sp = self.storagepools.create(name, devices=[disk.devicename], metadata_profile='single', data_profile='single', overwrite=True)
fscache_sp.mount()
try:
fscache_sp.get('logs')
except ValueError:
fscache_sp.create('logs')
# mount the storage pool
self._mount_fscache(fscache_sp)
return fscache_sp
def download_content(self, remote):
buff = BytesIO()
self.client.filesystem.download(remote, buff)
return buff.getvalue().decode()
def upload_content(self, remote, content):
if isinstance(content, str):
content = content.encode('utf8')
bytes = BytesIO(content)
self.client.filesystem.upload(remote, bytes)
def wipedisks(self):
print('Wiping node {hostname}'.format(**self.client.info.os()))
mounteddevices = {mount['device']: mount for mount in self.client.info.disk()}
def getmountpoint(device):
for mounteddevice, mount in mounteddevices.items():
if mounteddevice.startswith(device):
return mount
jobs = []
for disk in self.client.disk.list()['blockdevices']:
devicename = '/dev/{}'.format(disk['kname'])
mount = getmountpoint(devicename)
if not mount:
print(' * Wiping disk {kname}'.format(**disk))
jobs.append(self.client.system('dd if=/dev/zero of={} bs=1M count=50'.format(devicename)))
else:
print(' * Not wiping {device} mounted at {mountpoint}'.format(device=devicename, mountpoint=mount['mountpoint']))
# wait for wiping to complete
for job in jobs:
job.get()
def list_mounts(self):
allmounts = []
for mount in self.client.info.disk():
allmounts.append(Mount(mount['device'],
mount['mountpoint'],
mount['fstype'],
mount['opts']))
return allmounts
def __str__(self):
return "Node <{host}:{port}>".format(
host=self.addr,
port=self.port,
)
def __repr__(self):
return str(self)
def __eq__(self, other):
a = "{}:{}".format(self.addr, self.port)
b = "{}:{}".format(other.addr, other.port)
return a == b
def __hash__(self):
return hash((self.addr, self.port)) | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/Node.py | Node.py |
import ssl
import json
import aioredis
import sys
import uuid
import time
import logging
import asyncio
logger = logging.getLogger('g8core')
class Response:
def __init__(self, client, id):
self._client = client
self._id = id
self._queue = 'result:{}'.format(id)
async def exists(self):
r = self._client._redis
flag = '{}:flag'.format(self._queue)
key_exists = await r.connection.execute('LKEYEXISTS', flag)
return bool(key_exists)
async def get(self, timeout=None):
if timeout is None:
timeout = self._client.timeout
r = self._client._redis
start = time.time()
maxwait = timeout
while maxwait > 0:
job_exists = await self.exists()
if not job_exists:
raise RuntimeError("Job not found: %s" % self.id)
v = await r.brpoplpush(self._queue, self._queue, min(maxwait, 10))
if v is not None:
return json.loads(v.decode())
logger.debug('%s still waiting (%ss)', self._id, int(time.time() - start))
maxwait -= 10
raise TimeoutError()
class Pubsub:
def __init__(self, loop, host, port=6379, password="", db=0, ctx=None, timeout=None, testConnectionAttempts=3, callback=None):
socket_timeout = (timeout + 5) if timeout else 15
self.testConnectionAttempts = testConnectionAttempts
self._redis = None
self.host = host
self.port = port
self.password = password
self.db = db
if ctx is None:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.ssl = ctx
self.timeout = socket_timeout
self.loop = loop
async def default_callback(job_id, level, line, meta):
w = sys.stdout if level == 1 else sys.stderr
w.write(line)
w.write('\n')
self.callback = callback or default_callback
if not callable(self.callback):
raise Exception('callback must be callable')
async def get(self):
if self._redis is not None:
return self._redis
self._redis = await aioredis.create_redis((self.host, self.port),
loop=self.loop,
password=self.password,
db=self.db,
ssl=self.ssl,
timeout=self.timeout)
return self._redis
async def global_stream(self, queue, timeout=120):
if self._redis.connection.closed:
self._redis = await self.get()
data = await asyncio.wait_for(self._redis.blpop(queue, timeout=timeout), timeout=timeout)
_, body = data
payload = json.loads(body.decode())
message = payload['message']
line = message['message']
meta = message['meta']
job_id = payload['command']
await self.callback(job_id, meta >> 16, line, meta & 0xff)
async def raw(self, command, arguments, queue=None, max_time=None, stream=False, tags=None, id=None):
if not id:
id = str(uuid.uuid4())
payload = {
'id': id,
'command': command,
'arguments': arguments,
'queue': queue,
'max_time': max_time,
'stream': stream,
'tags': tags,
}
self._redis = await self.get()
flag = 'result:{}:flag'.format(id)
await self._redis.rpush('core:default', json.dumps(payload))
if await self._redis.brpoplpush(flag, flag, 10) is None:
raise TimeoutError('failed to queue job {}'.format(id))
logger.debug('%s >> g8core.%s(%s)', id, command, ', '.join(("%s=%s" % (k, v) for k, v in arguments.items())))
return Response(self, id)
async def sync(self, command, args):
response = await self.raw(command, args)
result = await response.get()
if result["state"] != 'SUCCESS':
raise RuntimeError('invalid response: %s' % result["state"])
return json.loads(result["data"])
async def ping(self):
response = await self.sync('core.ping', {})
return response
async def subscribe(self, queue=None):
response = await self.sync('logger.subscribe', {'queue': queue})
return response | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/Pubsub.py | Pubsub.py |
import io
import time
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class StorageEngine:
"""storageEngine server"""
def __init__(self, name, container, bind='0.0.0.0:16379', data_dir='/mnt/data', master=None):
"""
TODO: write doc string
"""
self.name = name
self.master = master
self.container = container
self.bind = bind
self.port = int(bind.split(':')[1])
self.data_dir = data_dir
self.master = master
self._ays = None
@classmethod
def from_ays(cls, service, password=None):
logger.debug("create storageEngine from service (%s)", service)
from .Container import Container
container = Container.from_ays(service.parent, password)
if service.model.data.master != '':
master_service = service.aysrepo.serviceGet('storage_engine', service.model.data.master)
master = StorageEngine.from_ays(master_service, password)
else:
master = None
return cls(
name=service.name,
container=container,
bind=service.model.data.bind,
data_dir=service.model.data.homeDir,
master=master,
)
def _configure(self):
logger.debug("configure storageEngine")
buff = io.BytesIO()
self.container.client.filesystem.download('/etc/ardb.conf', buff)
content = buff.getvalue().decode()
# update config
content = content.replace('/mnt/data', self.data_dir)
content = content.replace('0.0.0.0:16379', self.bind)
mgmt_bind = "%s:%s" % (self.container.node.addr, self.port)
if self.bind != mgmt_bind:
content += "server[1].listen %s\n" % mgmt_bind
if self.master is not None:
_, port = self.master.bind.split(":")
content = content.replace('#slaveof 127.0.0.1:6379', 'slaveof {}:{}'.format(self.master.container.node.addr, port))
# make sure home directory exists
self.container.client.filesystem.mkdir(self.data_dir)
# upload new config
self.container.client.filesystem.upload('/etc/ardb.conf.used', io.BytesIO(initial_bytes=content.encode()))
def start(self, timeout=100):
if not self.container.is_running():
self.container.start()
running, _ = self.is_running()
if running:
return
logger.debug('start %s', self)
self._configure()
self.container.client.system('/bin/ardb-server /etc/ardb.conf.used', id="{}.{}".format("storage_engine", self.name))
# wait for storageEngine to start
start = time.time()
end = start + timeout
is_running, _ = self.is_running()
while not is_running and time.time() < end:
time.sleep(1)
is_running, _ = self.is_running()
if not is_running:
raise RuntimeError("storage server {} didn't started".format(self.name))
def stop(self, timeout=30):
if not self.container.is_running():
return
is_running, job = self.is_running()
if not is_running:
return
logger.debug('stop %s', self)
self.container.client.job.kill(job['cmd']['id'])
# wait for StorageEngine to stop
start = time.time()
end = start + timeout
is_running, _ = self.is_running()
while is_running and time.time() < end:
time.sleep(1)
is_running, _ = self.is_running()
if is_running:
raise RuntimeError("storage server {} didn't stopped")
def is_healthy(self):
import redis
client = redis.Redis(self.container.node.addr, self.port)
key = "keytest"
value = b"some test value"
if not client.set(key, value):
return False
result = client.get(key)
if result != value:
return False
client.delete(key)
if client.exists(key):
return False
return True
def is_running(self):
try:
if self.port not in self.container.node.freeports(self.port, 1):
for job in self.container.client.job.list():
if 'name' in job['cmd']['arguments'] and job['cmd']['arguments']['name'] == '/bin/ardb-server':
return (True, job)
return (False, None)
except Exception as err:
if str(err).find("invalid container id"):
return (False, None)
raise
@property
def ays(self):
if self._ays is None:
from JumpScale.sal.g8os.atyourservice.StorageCluster import storageEngineAys
self._ays = storageEngineAys(self)
return self._ays
def __str__(self):
return "storageEngine <{}>".format(self.name)
def __repr__(self):
return str(self) | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/StorageEngine.py | StorageEngine.py |
End of preview. Expand
in Dataset Viewer.
Dataset Card for "pypi_clean"
All of the latest package versions from pypi. The original data came from here. I pulled the latest versions of each package, then extracted only md
, rst
, ipynb
, and py
files.
I then applied some cleaning:
- rendering notebooks
- removing leading comments/licenses
- Downloads last month
- 68