code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from enum import Enum
class EnumZerotierType(Enum):
public = "public"
private = "private"
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/EnumZerotierType.py | EnumZerotierType.py |
from enum import Enum
class EnumContainerListItemStatus(Enum):
running = "running"
halted = "halted"
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/EnumContainerListItemStatus.py | EnumContainerListItemStatus.py |
from enum import Enum
class EnumVdiskListItemType(Enum):
boot = "boot"
db = "db"
cache = "cache"
tmp = "tmp"
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/EnumVdiskListItemType.py | EnumVdiskListItemType.py |
"""
Auto-generated class for VdiskRollback
"""
from . import client_support
class VdiskRollback(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(epoch):
"""
:type epoch: int
:rtype: VdiskRollback
"""
return VdiskRollback(
epoch=epoch,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'VdiskRollback'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'epoch'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.epoch = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/VdiskRollback.py | VdiskRollback.py |
from enum import Enum
class EnumDiskInfoType(Enum):
ssd = "ssd"
nvme = "nvme"
hdd = "hdd"
archive = "archive"
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/EnumDiskInfoType.py | EnumDiskInfoType.py |
"""
Auto-generated class for OSInfo
"""
from . import client_support
class OSInfo(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(bootTime, hostname, os, platform, platformFamily, platformVersion, procs, uptime, virtualizationRole, virtualizationSystem):
"""
:type bootTime: int
:type hostname: str
:type os: str
:type platform: str
:type platformFamily: str
:type platformVersion: str
:type procs: int
:type uptime: int
:type virtualizationRole: str
:type virtualizationSystem: str
:rtype: OSInfo
"""
return OSInfo(
bootTime=bootTime,
hostname=hostname,
os=os,
platform=platform,
platformFamily=platformFamily,
platformVersion=platformVersion,
procs=procs,
uptime=uptime,
virtualizationRole=virtualizationRole,
virtualizationSystem=virtualizationSystem,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'OSInfo'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'bootTime'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.bootTime = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'hostname'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.hostname = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'os'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.os = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'platform'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.platform = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'platformFamily'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.platformFamily = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'platformVersion'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.platformVersion = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'procs'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.procs = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'uptime'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.uptime = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'virtualizationRole'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.virtualizationRole = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'virtualizationSystem'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.virtualizationSystem = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/OSInfo.py | OSInfo.py |
"""
Auto-generated class for Process
"""
from .CPUStats import CPUStats
from . import client_support
class Process(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(cmdline, cpu, pid, rss, swap, vms):
"""
:type cmdline: str
:type cpu: CPUStats
:type pid: int
:type rss: int
:type swap: int
:type vms: int
:rtype: Process
"""
return Process(
cmdline=cmdline,
cpu=cpu,
pid=pid,
rss=rss,
swap=swap,
vms=vms,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'Process'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'cmdline'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.cmdline = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'cpu'
val = data.get(property_name)
if val is not None:
datatypes = [CPUStats]
try:
self.cpu = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'pid'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.pid = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'rss'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.rss = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'swap'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.swap = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'vms'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.vms = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/Process.py | Process.py |
"""
Auto-generated class for MemInfo
"""
from . import client_support
class MemInfo(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(active, available, buffers, cached, free, inactive, total, used, usedPercent, wired):
"""
:type active: int
:type available: int
:type buffers: int
:type cached: int
:type free: int
:type inactive: int
:type total: int
:type used: int
:type usedPercent: float
:type wired: int
:rtype: MemInfo
"""
return MemInfo(
active=active,
available=available,
buffers=buffers,
cached=cached,
free=free,
inactive=inactive,
total=total,
used=used,
usedPercent=usedPercent,
wired=wired,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'MemInfo'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'active'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.active = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'available'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.available = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'buffers'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.buffers = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'cached'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.cached = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'free'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.free = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'inactive'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.inactive = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'total'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.total = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'used'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.used = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'usedPercent'
val = data.get(property_name)
if val is not None:
datatypes = [float]
try:
self.usedPercent = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'wired'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.wired = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/MemInfo.py | MemInfo.py |
from enum import Enum
class EnumVdiskStatus(Enum):
running = "running"
halted = "halted"
rollingback = "rollingback"
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/EnumVdiskStatus.py | EnumVdiskStatus.py |
from enum import Enum
class EnumStorageServerStatus(Enum):
ready = "ready"
error = "error"
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/EnumStorageServerStatus.py | EnumStorageServerStatus.py |
"""
Auto-generated class for VMCreate
"""
from .NicLink import NicLink
from .VDiskLink import VDiskLink
from . import client_support
class VMCreate(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(cpu, disks, id, memory, nics):
"""
:type cpu: int
:type disks: list[VDiskLink]
:type id: str
:type memory: int
:type nics: list[NicLink]
:rtype: VMCreate
"""
return VMCreate(
cpu=cpu,
disks=disks,
id=id,
memory=memory,
nics=nics,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'VMCreate'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'cpu'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.cpu = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'disks'
val = data.get(property_name)
if val is not None:
datatypes = [VDiskLink]
try:
self.disks = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'id'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.id = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'memory'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.memory = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'nics'
val = data.get(property_name)
if val is not None:
datatypes = [NicLink]
try:
self.nics = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/VMCreate.py | VMCreate.py |
"""
Auto-generated class for Zerotier
"""
from .EnumZerotierType import EnumZerotierType
from .ZerotierRoute import ZerotierRoute
from . import client_support
class Zerotier(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(allowDefault, allowGlobal, allowManaged, assignedAddresses, bridge, broadcastEnabled, dhcp, mac, mtu, name, netconfRevision, nwid, portDeviceName, portError, routes, status, type):
"""
:type allowDefault: bool
:type allowGlobal: bool
:type allowManaged: bool
:type assignedAddresses: list[str]
:type bridge: bool
:type broadcastEnabled: bool
:type dhcp: bool
:type mac: str
:type mtu: int
:type name: str
:type netconfRevision: int
:type nwid: str
:type portDeviceName: str
:type portError: int
:type routes: list[ZerotierRoute]
:type status: str
:type type: EnumZerotierType
:rtype: Zerotier
"""
return Zerotier(
allowDefault=allowDefault,
allowGlobal=allowGlobal,
allowManaged=allowManaged,
assignedAddresses=assignedAddresses,
bridge=bridge,
broadcastEnabled=broadcastEnabled,
dhcp=dhcp,
mac=mac,
mtu=mtu,
name=name,
netconfRevision=netconfRevision,
nwid=nwid,
portDeviceName=portDeviceName,
portError=portError,
routes=routes,
status=status,
type=type,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'Zerotier'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'allowDefault'
val = data.get(property_name)
if val is not None:
datatypes = [bool]
try:
self.allowDefault = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'allowGlobal'
val = data.get(property_name)
if val is not None:
datatypes = [bool]
try:
self.allowGlobal = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'allowManaged'
val = data.get(property_name)
if val is not None:
datatypes = [bool]
try:
self.allowManaged = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'assignedAddresses'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.assignedAddresses = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'bridge'
val = data.get(property_name)
if val is not None:
datatypes = [bool]
try:
self.bridge = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'broadcastEnabled'
val = data.get(property_name)
if val is not None:
datatypes = [bool]
try:
self.broadcastEnabled = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'dhcp'
val = data.get(property_name)
if val is not None:
datatypes = [bool]
try:
self.dhcp = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'mac'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.mac = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'mtu'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.mtu = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'name'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.name = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'netconfRevision'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.netconfRevision = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'nwid'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.nwid = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'portDeviceName'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.portDeviceName = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'portError'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.portError = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'routes'
val = data.get(property_name)
if val is not None:
datatypes = [ZerotierRoute]
try:
self.routes = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'status'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.status = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'type'
val = data.get(property_name)
if val is not None:
datatypes = [EnumZerotierType]
try:
self.type = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/Zerotier.py | Zerotier.py |
from enum import Enum
class EnumBridgeStatus(Enum):
up = "up"
down = "down"
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/EnumBridgeStatus.py | EnumBridgeStatus.py |
"""
Auto-generated class for Graph
"""
from . import client_support
class Graph(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(id, url):
"""
:type id: str
:type url: str
:rtype: Graph
"""
return Graph(
id=id,
url=url,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'Graph'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'id'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.id = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'url'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.url = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/Graph.py | Graph.py |
from enum import Enum
class EnumVdiskListItemStatus(Enum):
running = "running"
halted = "halted"
rollingback = "rollingback"
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/EnumVdiskListItemStatus.py | EnumVdiskListItemStatus.py |
import requests
from .Bridge import Bridge
from .BridgeCreate import BridgeCreate
from .BridgeCreateSetting import BridgeCreateSetting
from .CPUInfo import CPUInfo
from .CPUStats import CPUStats
from .CloudInit import CloudInit
from .Cluster import Cluster
from .ClusterCreate import ClusterCreate
from .Container import Container
from .ContainerListItem import ContainerListItem
from .ContainerNIC import ContainerNIC
from .ContainerNICconfig import ContainerNICconfig
from .CoreStateResult import CoreStateResult
from .CoreSystem import CoreSystem
from .CreateContainer import CreateContainer
from .CreateSnapshotReqBody import CreateSnapshotReqBody
from .DHCP import DHCP
from .Dashboard import Dashboard
from .DashboardListItem import DashboardListItem
from .DeleteFile import DeleteFile
from .DiskInfo import DiskInfo
from .DiskPartition import DiskPartition
from .EnumBridgeCreateNetworkMode import EnumBridgeCreateNetworkMode
from .EnumBridgeStatus import EnumBridgeStatus
from .EnumClusterCreateClusterType import EnumClusterCreateClusterType
from .EnumClusterCreateDriveType import EnumClusterCreateDriveType
from .EnumClusterDriveType import EnumClusterDriveType
from .EnumClusterStatus import EnumClusterStatus
from .EnumContainerListItemStatus import EnumContainerListItemStatus
from .EnumContainerNICStatus import EnumContainerNICStatus
from .EnumContainerNICType import EnumContainerNICType
from .EnumContainerStatus import EnumContainerStatus
from .EnumDiskInfoType import EnumDiskInfoType
from .EnumGWNICType import EnumGWNICType
from .EnumGetGWStatus import EnumGetGWStatus
from .EnumJobResultName import EnumJobResultName
from .EnumJobResultState import EnumJobResultState
from .EnumNicLinkType import EnumNicLinkType
from .EnumNodeStatus import EnumNodeStatus
from .EnumStoragePoolCreateDataProfile import EnumStoragePoolCreateDataProfile
from .EnumStoragePoolCreateMetadataProfile import EnumStoragePoolCreateMetadataProfile
from .EnumStoragePoolDataProfile import EnumStoragePoolDataProfile
from .EnumStoragePoolDeviceStatus import EnumStoragePoolDeviceStatus
from .EnumStoragePoolListItemStatus import EnumStoragePoolListItemStatus
from .EnumStoragePoolMetadataProfile import EnumStoragePoolMetadataProfile
from .EnumStoragePoolStatus import EnumStoragePoolStatus
from .EnumStorageServerStatus import EnumStorageServerStatus
from .EnumVMListItemStatus import EnumVMListItemStatus
from .EnumVMStatus import EnumVMStatus
from .EnumVdiskCreateType import EnumVdiskCreateType
from .EnumVdiskListItemStatus import EnumVdiskListItemStatus
from .EnumVdiskListItemType import EnumVdiskListItemType
from .EnumVdiskStatus import EnumVdiskStatus
from .EnumVdiskType import EnumVdiskType
from .EnumZerotierListItemType import EnumZerotierListItemType
from .EnumZerotierType import EnumZerotierType
from .Filesystem import Filesystem
from .FilesystemCreate import FilesystemCreate
from .GW import GW
from .GWCreate import GWCreate
from .GWHost import GWHost
from .GWNIC import GWNIC
from .GWNICconfig import GWNICconfig
from .GetGW import GetGW
from .Graph import Graph
from .HTTPProxy import HTTPProxy
from .HTTPType import HTTPType
from .HealthCheck import HealthCheck
from .IPProtocol import IPProtocol
from .Job import Job
from .JobListItem import JobListItem
from .JobResult import JobResult
from .ListGW import ListGW
from .MemInfo import MemInfo
from .NicInfo import NicInfo
from .NicLink import NicLink
from .Node import Node
from .NodeHealthCheck import NodeHealthCheck
from .NodeMount import NodeMount
from .OSInfo import OSInfo
from .PortForward import PortForward
from .Process import Process
from .ProcessSignal import ProcessSignal
from .Snapshot import Snapshot
from .StoragePool import StoragePool
from .StoragePoolCreate import StoragePoolCreate
from .StoragePoolDevice import StoragePoolDevice
from .StoragePoolListItem import StoragePoolListItem
from .StorageServer import StorageServer
from .VDiskLink import VDiskLink
from .VM import VM
from .VMCreate import VMCreate
from .VMDiskInfo import VMDiskInfo
from .VMInfo import VMInfo
from .VMListItem import VMListItem
from .VMMigrate import VMMigrate
from .VMNicInfo import VMNicInfo
from .VMUpdate import VMUpdate
from .Vdisk import Vdisk
from .VdiskCreate import VdiskCreate
from .VdiskListItem import VdiskListItem
from .VdiskResize import VdiskResize
from .VdiskRollback import VdiskRollback
from .WriteFile import WriteFile
from .Zerotier import Zerotier
from .ZerotierBridge import ZerotierBridge
from .ZerotierJoin import ZerotierJoin
from .ZerotierListItem import ZerotierListItem
from .ZerotierRoute import ZerotierRoute
from .client import Client as APIClient
from .oauth2_client_itsyouonline import Oauth2ClientItsyouonline
class Client:
def __init__(self, base_uri=""):
self.api = APIClient(base_uri)
self.oauth2_client_itsyouonline = Oauth2ClientItsyouonline() | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/__init__.py | __init__.py |
"""
Auto-generated class for BridgeCreate
"""
from .BridgeCreateSetting import BridgeCreateSetting
from .EnumBridgeCreateNetworkMode import EnumBridgeCreateNetworkMode
from . import client_support
class BridgeCreate(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(name, nat, networkMode, setting, hwaddr=None):
"""
:type hwaddr: str
:type name: str
:type nat: bool
:type networkMode: EnumBridgeCreateNetworkMode
:type setting: BridgeCreateSetting
:rtype: BridgeCreate
"""
return BridgeCreate(
hwaddr=hwaddr,
name=name,
nat=nat,
networkMode=networkMode,
setting=setting,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'BridgeCreate'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'hwaddr'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.hwaddr = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
property_name = 'name'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.name = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'nat'
val = data.get(property_name)
if val is not None:
datatypes = [bool]
try:
self.nat = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'networkMode'
val = data.get(property_name)
if val is not None:
datatypes = [EnumBridgeCreateNetworkMode]
try:
self.networkMode = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'setting'
val = data.get(property_name)
if val is not None:
datatypes = [BridgeCreateSetting]
try:
self.setting = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/BridgeCreate.py | BridgeCreate.py |
"""
Auto-generated class for CPUInfo
"""
from . import client_support
class CPUInfo(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(cacheSize, cores, family, flags, mhz):
"""
:type cacheSize: int
:type cores: int
:type family: str
:type flags: list[str]
:type mhz: float
:rtype: CPUInfo
"""
return CPUInfo(
cacheSize=cacheSize,
cores=cores,
family=family,
flags=flags,
mhz=mhz,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'CPUInfo'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'cacheSize'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.cacheSize = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'cores'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.cores = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'family'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.family = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'flags'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.flags = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'mhz'
val = data.get(property_name)
if val is not None:
datatypes = [float]
try:
self.mhz = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/CPUInfo.py | CPUInfo.py |
"""
Auto-generated class for ZerotierRoute
"""
from . import client_support
class ZerotierRoute(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(flags, metric, target, via):
"""
:type flags: int
:type metric: int
:type target: str
:type via: str
:rtype: ZerotierRoute
"""
return ZerotierRoute(
flags=flags,
metric=metric,
target=target,
via=via,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'ZerotierRoute'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'flags'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.flags = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'metric'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.metric = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'target'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.target = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'via'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.via = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/ZerotierRoute.py | ZerotierRoute.py |
from enum import Enum
class EnumStoragePoolListItemStatus(Enum):
healthy = "healthy"
degraded = "degraded"
error = "error"
unknown = "unknown"
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/EnumStoragePoolListItemStatus.py | EnumStoragePoolListItemStatus.py |
"""
Auto-generated class for StoragePoolListItem
"""
from .EnumStoragePoolListItemStatus import EnumStoragePoolListItemStatus
from . import client_support
class StoragePoolListItem(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(capacity, name, status):
"""
:type capacity: str
:type name: str
:type status: EnumStoragePoolListItemStatus
:rtype: StoragePoolListItem
"""
return StoragePoolListItem(
capacity=capacity,
name=name,
status=status,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'StoragePoolListItem'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'capacity'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.capacity = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'name'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.name = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'status'
val = data.get(property_name)
if val is not None:
datatypes = [EnumStoragePoolListItemStatus]
try:
self.status = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/StoragePoolListItem.py | StoragePoolListItem.py |
"""
Auto-generated class for CPUStats
"""
from . import client_support
class CPUStats(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(guestNice, idle, ioWait, irq, nice, softIrq, steal, stolen, system, user):
"""
:type guestNice: float
:type idle: float
:type ioWait: float
:type irq: float
:type nice: float
:type softIrq: float
:type steal: float
:type stolen: float
:type system: float
:type user: float
:rtype: CPUStats
"""
return CPUStats(
guestNice=guestNice,
idle=idle,
ioWait=ioWait,
irq=irq,
nice=nice,
softIrq=softIrq,
steal=steal,
stolen=stolen,
system=system,
user=user,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'CPUStats'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'guestNice'
val = data.get(property_name)
if val is not None:
datatypes = [float]
try:
self.guestNice = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'idle'
val = data.get(property_name)
if val is not None:
datatypes = [float]
try:
self.idle = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'ioWait'
val = data.get(property_name)
if val is not None:
datatypes = [float]
try:
self.ioWait = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'irq'
val = data.get(property_name)
if val is not None:
datatypes = [float]
try:
self.irq = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'nice'
val = data.get(property_name)
if val is not None:
datatypes = [float]
try:
self.nice = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'softIrq'
val = data.get(property_name)
if val is not None:
datatypes = [float]
try:
self.softIrq = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'steal'
val = data.get(property_name)
if val is not None:
datatypes = [float]
try:
self.steal = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'stolen'
val = data.get(property_name)
if val is not None:
datatypes = [float]
try:
self.stolen = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'system'
val = data.get(property_name)
if val is not None:
datatypes = [float]
try:
self.system = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'user'
val = data.get(property_name)
if val is not None:
datatypes = [float]
try:
self.user = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/CPUStats.py | CPUStats.py |
"""
Auto-generated class for CoreStateResult
"""
from . import client_support
class CoreStateResult(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(cpu, rss, swap, vms):
"""
:type cpu: float
:type rss: int
:type swap: int
:type vms: int
:rtype: CoreStateResult
"""
return CoreStateResult(
cpu=cpu,
rss=rss,
swap=swap,
vms=vms,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'CoreStateResult'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'cpu'
val = data.get(property_name)
if val is not None:
datatypes = [float]
try:
self.cpu = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'rss'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.rss = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'swap'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.swap = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'vms'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.vms = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/CoreStateResult.py | CoreStateResult.py |
"""
Auto-generated class for NodeMount
"""
from . import client_support
class NodeMount(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(device, fstype, mountpoint, opts):
"""
:type device: str
:type fstype: str
:type mountpoint: str
:type opts: str
:rtype: NodeMount
"""
return NodeMount(
device=device,
fstype=fstype,
mountpoint=mountpoint,
opts=opts,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'NodeMount'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'device'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.device = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'fstype'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.fstype = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'mountpoint'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.mountpoint = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'opts'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.opts = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/NodeMount.py | NodeMount.py |
"""
Auto-generated class for DashboardListItem
"""
from . import client_support
class DashboardListItem(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(dashboard, name, slug, url):
"""
:type dashboard: str
:type name: str
:type slug: str
:type url: str
:rtype: DashboardListItem
"""
return DashboardListItem(
dashboard=dashboard,
name=name,
slug=slug,
url=url,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'DashboardListItem'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'dashboard'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.dashboard = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'name'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.name = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'slug'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.slug = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'url'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.url = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/DashboardListItem.py | DashboardListItem.py |
"""
Auto-generated class for ListGW
"""
from .GWNIC import GWNIC
from .HTTPProxy import HTTPProxy
from .PortForward import PortForward
from . import client_support
class ListGW(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(domain, name, nics, httpproxies=None, portforwards=None, zerotiernodeid=None):
"""
:type domain: str
:type httpproxies: list[HTTPProxy]
:type name: str
:type nics: list[GWNIC]
:type portforwards: list[PortForward]
:type zerotiernodeid: str
:rtype: ListGW
"""
return ListGW(
domain=domain,
httpproxies=httpproxies,
name=name,
nics=nics,
portforwards=portforwards,
zerotiernodeid=zerotiernodeid,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'ListGW'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'domain'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.domain = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'httpproxies'
val = data.get(property_name)
if val is not None:
datatypes = [HTTPProxy]
try:
self.httpproxies = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
property_name = 'name'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.name = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'nics'
val = data.get(property_name)
if val is not None:
datatypes = [GWNIC]
try:
self.nics = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'portforwards'
val = data.get(property_name)
if val is not None:
datatypes = [PortForward]
try:
self.portforwards = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
property_name = 'zerotiernodeid'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.zerotiernodeid = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/ListGW.py | ListGW.py |
"""
Auto-generated class for VMInfo
"""
from .VMDiskInfo import VMDiskInfo
from .VMNicInfo import VMNicInfo
from . import client_support
class VMInfo(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(cpu, disks, nics):
"""
:type cpu: list[float]
:type disks: list[VMDiskInfo]
:type nics: list[VMNicInfo]
:rtype: VMInfo
"""
return VMInfo(
cpu=cpu,
disks=disks,
nics=nics,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'VMInfo'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'cpu'
val = data.get(property_name)
if val is not None:
datatypes = [float]
try:
self.cpu = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'disks'
val = data.get(property_name)
if val is not None:
datatypes = [VMDiskInfo]
try:
self.disks = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'nics'
val = data.get(property_name)
if val is not None:
datatypes = [VMNicInfo]
try:
self.nics = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/client/VMInfo.py | VMInfo.py |
from enum import Enum
from .Partition import Partition
from .abstracts import Mountable
class DiskType(Enum):
ssd = "ssd"
hdd = "hdd"
nvme = "nvme"
archive = "archive"
cdrom = 'cdrom'
class Disks:
"""Subobject to list disks"""
def __init__(self, node):
self.node = node
self._client = node.client
def list(self):
"""
List of disks on the node
"""
disks = []
disk_list = self._client.disk.list()
if 'blockdevices' in disk_list:
for disk_info in self._client.disk.list()['blockdevices']:
disks.append(Disk(
node=self.node,
disk_info=disk_info
))
return disks
def get(self, name):
"""
return the disk called `name`
@param name: name of the disk
"""
for disk in self.list():
if disk.name == name:
return disk
return None
class Disk(Mountable):
"""Disk in a G8OS"""
def __init__(self, node, disk_info):
"""
disk_info: dict returned by client.disk.list()
"""
# g8os client to talk to the node
self.node = node
self._client = node.client
self.name = None
self.size = None
self.blocksize = None
self.partition_table = None
self.mountpoint = None
self.model = None
self._filesystems = []
self.type = None
self.partitions = []
self._load(disk_info)
@property
def devicename(self):
return "/dev/{}".format(self.name)
@property
def filesystems(self):
self._populate_filesystems()
return self._filesystems
def _load(self, disk_info):
self.name = disk_info['name']
detail = self._client.disk.getinfo(self.name)
self.size = int(disk_info['size'])
self.blocksize = detail['blocksize']
if detail['table'] != 'unknown':
self.partition_table = detail['table']
self.mountpoint = disk_info['mountpoint']
self.model = disk_info['model']
self.type = self._disk_type(disk_info)
for partition_info in disk_info.get('children', []) or []:
self.partitions.append(
Partition(
disk=self,
part_info=partition_info)
)
def _populate_filesystems(self):
"""
look into all the btrfs filesystem and populate
the filesystems attribute of the class with the detail of
all the filesystem present on the disk
"""
self._filesystems = []
for fs in (self._client.btrfs.list() or []):
for device in fs['devices']:
if device['path'] == "/dev/{}".format(self.name):
self._filesystems.append(fs)
break
def _disk_type(self, disk_info):
"""
return the type of the disk
"""
if disk_info['rota'] == "1":
if disk_info['type'] == 'rom':
return DiskType.cdrom
# assume that if a disk is more than 7TB it's a SMR disk
elif int(disk_info['size']) > (1024 * 1024 * 1024 * 1024 * 7):
return DiskType.archive
else:
return DiskType.hdd
else:
if "nvme" in disk_info['name']:
return DiskType.nvme
else:
return DiskType.ssd
def mktable(self, table_type='gpt', overwrite=False):
"""
create a partition table on the disk
@param table_type: Partition table type as accepted by parted
@param overwrite: erase any existing partition table
"""
if self.partition_table is not None and overwrite is False:
return
self._client.disk.mktable(
disk=self.name,
table_type=table_type
)
def mkpart(self, start, end, part_type="primary"):
"""
@param start: partition start as accepted by parted mkpart
@param end: partition end as accepted by parted mkpart
@param part_type: partition type as accepted by parted mkpart
"""
before = {p.name for p in self.partitions}
self._client.disk.mkpart(
self.name,
start=start,
end=end,
part_type=part_type,
)
after = {}
for disk in self._client.disk.list()['blockdevices']:
if disk['name'] != self.name:
continue
for part in disk.get('children', []):
after[part['name']] = part
name = set(after.keys()) - before
part_info = after[list(name)[0]]
partition = Partition(
disk=self,
part_info=part_info)
self.partitions.append(partition)
return partition
def __str__(self):
return "Disk <{}>".format(self.name)
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.devicename == other.devicename
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/Disk.py | Disk.py |
from .abstracts import Mountable
import os
import time
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def _prepare_device(node, devicename):
logger.debug("prepare device %s", devicename)
ss = devicename.split('/')
if len(ss) < 3:
raise RuntimeError("bad device name: {}".format(devicename))
name = ss[2]
disk = node.disks.get(name)
if disk is None:
raise ValueError("device {} not found".format(name))
node.client.system('parted -s /dev/{} mklabel gpt mkpart primary 1m 100%'.format(name)).get()
now = time.time()
# check partitions is ready and writable
while now + 60 > time.time():
try:
disk = node.disks.get(name)
if len(disk.partitions) > 0:
partition = disk.partitions[0]
resp = node.client.bash('test -b {0} && dd if={0} of=/dev/null bs=4k count=1024'.format(partition.devicename)).get()
if resp.state == 'SUCCESS':
return partition
except:
time.sleep(1)
continue
else:
raise RuntimeError("Failed to create partition")
class StoragePools:
def __init__(self, node):
self.node = node
self._client = node._client
def list(self):
storagepools = []
btrfs_list = self._client.btrfs.list()
if btrfs_list:
for btrfs in self._client.btrfs.list():
if btrfs['label'].startswith('sp_'):
name = btrfs['label'].split('_', 1)[1]
devicenames = [device['path'] for device in btrfs['devices']]
storagepools.append(StoragePool(self.node, name, devicenames))
return storagepools
def get(self, name):
for pool in self.list():
if pool.name == name:
return pool
raise ValueError("Could not find StoragePool with name {}".format(name))
def create(self, name, devices, metadata_profile, data_profile, overwrite=False):
label = 'sp_{}'.format(name)
logger.debug("create storagepool %s", label)
device_names = []
for device in devices:
part = _prepare_device(self.node, device)
device_names.append(part.devicename)
self._client.btrfs.create(label, device_names, metadata_profile, data_profile, overwrite=overwrite)
pool = StoragePool(self.node, name, device_names)
return pool
class StoragePool(Mountable):
def __init__(self, node, name, devices):
self.node = node
self._client = node._client
self.devices = devices
self.name = name
self._mountpoint = None
self._ays = None
@property
def devicename(self):
return 'UUID={}'.format(self.uuid)
def mount(self, target=None):
if target is None:
target = os.path.join('/mnt/storagepools/{}'.format(self.name))
return super().mount(target)
def delete(self, zero=True):
"""
Destroy storage pool
param zero: write zeros (nulls) to the first 500MB of each disk in this storagepool
"""
if self.mountpoint:
self.umount()
partitionmap = {}
for disk in self.node.disks.list():
for partition in disk.partitions:
partitionmap[partition.name] = partition
for device in self.devices:
diskpath = os.path.basename(device)
partition = partitionmap.get(diskpath)
if partition:
disk = partition.disk
self._client.disk.rmpart(disk.name, 1)
if zero:
self._client.bash('test -b /dev/{0} && dd if=/dev/zero bs=1M count=500 of=/dev/{0}'.format(diskpath)).get()
return True
return False
@property
def mountpoint(self):
mounts = self.node.list_mounts()
for device in self.devices:
for mount in mounts:
if mount.device == device:
options = mount.options.split(',')
if 'subvol=/' in options:
return mount.mountpoint
def is_device_used(self, device):
"""
check if the device passed as argument is already part of this storagepool
@param device: str e.g: /dev/sda
"""
for d in self.devices:
if d.startswith(device):
return True
return False
def device_add(self, *devices):
to_add = []
for device in devices:
if self.is_device_used(device):
continue
part = _prepare_device(self.node, device)
logger.debug("add device %s to %s", device, self)
to_add.append(part.devicename)
self._client.btrfs.device_add(self._get_mountpoint(), *to_add)
self.devices.extend(to_add)
def device_remove(self, *devices):
self._client.btrfs.device_remove(self._get_mountpoint(), *devices)
for device in devices:
if device in self.devices:
logger.debug("remove device %s to %s", device, self)
self.devices.remove(device)
@property
def fsinfo(self):
if self.mountpoint is None:
raise ValueError("can't get fsinfo if storagepool is not mounted")
return self._client.btrfs.info(self.mountpoint)
@mountpoint.setter
def mountpoint(self, value):
# do not do anything mountpoint is dynamic
return
def _get_mountpoint(self):
mountpoint = self.mountpoint
if not mountpoint:
raise RuntimeError("Can not perform action when filesystem is not mounted")
return mountpoint
@property
def info(self):
for fs in self._client.btrfs.list():
if fs['label'] == 'sp_{}'.format(self.name):
return fs
return None
def raw_list(self):
mountpoint = self._get_mountpoint()
return self._client.btrfs.subvol_list(mountpoint) or []
def get_devices_and_status(self):
device_map = []
disks = self._client.disk.list()['blockdevices']
pool_status = 'healthy'
for device in self.devices:
info = None
for disk in disks:
disk_name = "/dev/%s" % disk['kname']
if device == disk_name and disk['mountpoint']:
info = disk
break
for part in disk.get('children', []) or []:
if device == "/dev/%s" % part['kname']:
info = part
break
if info:
break
status = 'healthy'
if info['subsystems'] != 'block:virtio:pci':
result = self._client.bash("smartctl -H %s > /dev/null ;echo $?" % disk_name).get()
exit_status = int(result.stdout)
if exit_status & 1 << 0:
status = "unknown"
pool_status = 'degraded'
if (exit_status & 1 << 2) or (exit_status & 1 << 3):
status = 'degraded'
pool_status = 'degraded'
device_map.append({
'device': device,
'partUUID': info['partuuid'] or '' if info else '',
'status': status,
})
return device_map, pool_status
def list(self):
subvolumes = []
for subvol in self.raw_list():
path = subvol['Path']
type_, _, name = path.partition('/')
if type_ == 'filesystems':
subvolumes.append(FileSystem(name, self))
return subvolumes
def get(self, name):
"""
Get Filesystem
"""
for filesystem in self.list():
if filesystem.name == name:
return filesystem
raise ValueError("Could not find filesystem with name {}".format(name))
def exists(self, name):
"""
Check if filesystem with name exists
"""
for subvolume in self.list():
if subvolume.name == name:
return True
return False
def create(self, name, quota=None):
"""
Create filesystem
"""
logger.debug("Create filesystem %s on %s", name, self)
mountpoint = self._get_mountpoint()
fspath = os.path.join(mountpoint, 'filesystems')
self._client.filesystem.mkdir(fspath)
subvolpath = os.path.join(fspath, name)
self._client.btrfs.subvol_create(subvolpath)
if quota:
pass
return FileSystem(name, self)
@property
def size(self):
total = 0
fs = self.info
if fs:
for device in fs['devices']:
total += device['size']
return total
@property
def uuid(self):
fs = self.info
if fs:
return fs['uuid']
return None
@property
def used(self):
total = 0
fs = self.info
if fs:
for device in fs['devices']:
total += device['used']
return total
@property
def ays(self):
if self._ays is None:
from zeroos.orchestrator.sal.atyourservice.StoragePool import StoragePoolAys
self._ays = StoragePoolAys(self)
return self._ays
def __repr__(self):
return "StoragePool <{}>".format(self.name)
class FileSystem:
def __init__(self, name, pool):
self.name = name
self.pool = pool
self._client = pool.node.client
self.subvolume = "filesystems/{}".format(name)
self.path = os.path.join(self.pool.mountpoint, self.subvolume)
self.snapshotspath = os.path.join(self.pool.mountpoint, 'snapshots', self.name)
self._ays = None
def delete(self, includesnapshots=True):
"""
Delete filesystem
"""
paths = [fs['Path'] for fs in self._client.btrfs.subvol_list(self.path)]
paths.sort(reverse=True)
for path in paths:
rpath = os.path.join(self.path, os.path.relpath(path, self.subvolume))
self._client.btrfs.subvol_delete(rpath)
self._client.btrfs.subvol_delete(self.path)
if includesnapshots:
for snapshot in self.list():
snapshot.delete()
self._client.filesystem.remove(self.snapshotspath)
def get(self, name):
"""
Get snapshot
"""
for snap in self.list():
if snap.name == name:
return snap
raise ValueError("Could not find snapshot {}".format(name))
def list(self):
"""
List snapshots
"""
snapshots = []
if self._client.filesystem.exists(self.snapshotspath):
for fileentry in self._client.filesystem.list(self.snapshotspath):
if fileentry['is_dir']:
snapshots.append(Snapshot(fileentry['name'], self))
return snapshots
def exists(self, name):
"""
Check if a snapshot exists
"""
return name in self.list()
def create(self, name):
"""
Create snapshot
"""
logger.debug("create snapshot %s on %s", name, self.pool)
snapshot = Snapshot(name, self)
if self.exists(name):
raise RuntimeError("Snapshot path {} exists.")
self._client.filesystem.mkdir(self.snapshotspath)
self._client.btrfs.subvol_snapshot(self.path, snapshot.path)
return snapshot
@property
def ays(self):
if self._ays is None:
from JumpScale.sal.g8os.atyourservice.StoragePool import FileSystemAys
self._ays = FileSystemAys(self)
return self._ays
def __repr__(self):
return "FileSystem <{}: {!r}>".format(self.name, self.pool)
class Snapshot:
def __init__(self, name, filesystem):
self.filesystem = filesystem
self._client = filesystem.pool.node.client
self.name = name
self.path = os.path.join(self.filesystem.snapshotspath, name)
self.subvolume = "snapshots/{}/{}".format(self.filesystem.name, name)
def rollback(self):
self.filesystem.delete(False)
self._client.btrfs.subvol_snapshot(self.path, self.filesystem.path)
def delete(self):
self._client.btrfs.subvol_delete(self.path)
def __repr__(self):
return "Snapshot <{}: {!r}>".format(self.name, self.filesystem)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/StoragePool.py | StoragePool.py |
from zerotier.client import Client
import netaddr
class ZTBootstrap:
def __init__(self, token, bootstap_id, grid_id, cidr):
self.bootstap_nwid = bootstap_id
self.grid_nwid = grid_id
self._cidr = cidr # TODO validate format
# create client and set the authentication header
self._zt = Client()
self._zt.set_auth_header("Bearer " + token)
def configure_routes(self):
for nwid in [self.bootstap_nwid, self.grid_nwid]:
resp = self._zt.network.getNetwork(nwid)
resp.raise_for_status()
nw = resp.json()
nw['config']['routes'] = [{'target': self._cidr, 'via': None}]
self._zt.network.updateNetwork(nw, nwid).raise_for_status()
def list_join_request(self):
"""
return a list of member that try to access the bootstap network
"""
resp = self._zt.network.listMembers(id=self.bootstap_nwid)
resp.raise_for_status()
requests = []
for member in resp.json():
if not member['online'] or member['config']['authorized']:
continue
requests.append(member)
return requests
def assign_ip(self, nwid, member, ip=None):
"""
Assign an Ip address to a member in a certain network
@nwid : id of the network
@member : member object
@ip: ip address to assing to the member, if None take the next free IP in the range
"""
if ip is None:
ip = self._find_free_ip(nwid)
member['config']['authorized'] = True
member['config']['ipAssignments'] = [ip]
resp = self._zt.network.updateMember(member, member['nodeId'], nwid)
resp.raise_for_status()
return ip
def unauthorize_member(self, nwid, member):
member['config']['authorized'] = False
member['config']['ipAssignments'] = []
resp = self._zt.network.updateMember(member, member['nodeId'], nwid)
resp.raise_for_status()
def _find_free_ip(self, nwid):
resp = self._zt.network.listMembers(nwid)
resp.raise_for_status()
all_ips = list(netaddr.IPNetwork(self._cidr))
for member in resp.json():
for addr in member['config']['ipAssignments']:
all_ips.remove(netaddr.IPAddress(addr))
if len(all_ips) <= 0:
raise RuntimeError("No more free ip in the range %s" % self._cidr)
return str(all_ips[0])
if __name__ == '__main__':
token = '4gE9Cfqw2vFFzCPC1BYaj2mbSpNScxJx'
bootstap_nwid = '17d709436c993670'
grid_nwid = 'a09acf02336ce8b5'
zt = ZTBootstrap(token, bootstap_nwid, grid_nwid, '192.168.10.0/24')
from IPython import embed; embed()
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/ZerotierBootstrap.py | ZerotierBootstrap.py |
from js9 import j
class Mountable:
"""
Abstract implementation for devices that are mountable.
Device should have attributes devicename and mountpoint
"""
def mount(self, target, options=['defaults']):
"""
@param target: Mount point
@param options: Optional mount options
"""
if self.mountpoint == target:
return
self._client.bash('mkdir -p {}'.format(target))
self._client.disk.mount(
source=self.devicename,
target=target,
options=options,
)
self.mountpoint = target
def umount(self):
"""
Unmount disk
"""
if self.mountpoint:
self._client.disk.umount(
source=self.mountpoint,
)
self.mountpoint = None
class AYSable:
"""
Abstract implementation for class that reflect an AYS service.
class should have a name and actor attributes
This provide common method to CRUD AYS service from the python classes
"""
@property
def name(self):
return self._obj.name
@property
def role(self):
return self.actor.split('.')[0]
def create(self, aysrepo):
"""
create the AYS Service
"""
raise NotImplementedError()
def get(self, aysrepo):
"""
get the AYS service
"""
try:
return aysrepo.serviceGet(role=self.role, instance=self.name)
except j.exceptions.NotFound:
raise ValueError("Could not find {} with name {}".format(self.role, self.name))
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/abstracts.py | abstracts.py |
from .abstracts import Mountable
class Partition(Mountable):
"""Partition of a disk in a G8OS"""
def __init__(self, disk, part_info):
"""
part_info: dict returned by client.disk.list()
"""
# g8os client to talk to the node
self.disk = disk
self._client = disk.node.client
self.name = None
self.size = None
self.blocksize = None
self.mountpoint = None
self.uuid = None
self._filesystems = []
self._load(part_info)
@property
def filesystems(self):
self._populate_filesystems()
return self._filesystems
@property
def devicename(self):
return "/dev/{}".format(self.name)
def _load(self, part_info):
self.name = part_info['name']
self.size = int(part_info['size'])
self.blocksize = self.disk.blocksize
self.mountpoint = part_info['mountpoint']
self.uuid = part_info['partuuid']
def _populate_filesystems(self):
"""
look into all the btrfs filesystem and populate
the filesystems attribute of the class with the detail of
all the filesystem present on the disk
"""
self._filesystems = []
for fs in (self._client.btrfs.list() or []):
for device in fs['devices']:
if device['path'] == "/dev/{}".format(self.name):
self._filesystems.append(fs)
break
def __str__(self):
return "Partition <{}>".format(self.name)
def __repr__(self):
return str(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/Partition.py | Partition.py |
import json
from io import BytesIO
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Containers:
def __init__(self, node):
self.node = node
def list(self):
containers = []
for container in self.node.client.container.list().values():
try:
containers.append(Container.from_containerinfo(container, self.node))
except ValueError:
# skip containers withouth tags
pass
return containers
def get(self, name):
containers = list(self.node.client.container.find(name).values())
if not containers:
raise LookupError("Could not find container with name {}".format(name))
if len(containers) > 1:
raise LookupError("Found more than one containter with name {}".format(name))
return Container.from_containerinfo(containers[0], self.node)
def create(self, name, flist, hostname=None, mounts=None, nics=None,
host_network=False, ports=None, storage=None, init_processes=None, privileged=False):
logger.debug("create container %s", name)
container = Container(name, self.node, flist, hostname, mounts, nics,
host_network, ports, storage, init_processes, privileged)
container.start()
return container
class Container:
"""G8SO Container"""
def __init__(self, name, node, flist, hostname=None, mounts=None, nics=None,
host_network=False, ports=None, storage=None, init_processes=None,
privileged=False, identity=None):
"""
TODO: write doc string
filesystems: dict {filesystemObj: target}
"""
self.name = name
self.node = node
self.mounts = mounts or {}
self.hostname = hostname
self.flist = flist
self.ports = ports or {}
self.nics = nics or []
self.host_network = host_network
self.storage = storage
self.init_processes = init_processes or []
self._client = None
self.privileged = privileged
self.identity = identity
self._ays = None
for nic in self.nics:
nic.pop('token', None)
if nic.get('config', {}).get('gateway', ''):
nic['monitor'] = True
@classmethod
def from_containerinfo(cls, containerinfo, node):
logger.debug("create container from info")
arguments = containerinfo['container']['arguments']
if not arguments['tags']:
# we don't deal with tagless containers
raise ValueError("Could not load containerinfo withouth tags")
return cls(arguments['tags'][0],
node,
arguments['root'],
arguments['hostname'],
arguments['mount'],
arguments['nics'],
arguments['host_network'],
arguments['port'],
arguments['storage'],
arguments['privileged'],
arguments['identity'])
@classmethod
def from_ays(cls, service, password=None):
logger.debug("create container from service (%s)", service)
from .Node import Node
node = Node.from_ays(service.parent, password)
ports = {}
for portmap in service.model.data.ports:
source, dest = portmap.split(':')
ports[int(source)] = int(dest)
nics = [nic.to_dict() for nic in service.model.data.nics]
mounts = {}
for mount in service.model.data.mounts:
fs_service = service.aysrepo.serviceGet('filesystem', mount.filesystem)
try:
sp = node.storagepools.get(fs_service.parent.name)
fs = sp.get(fs_service.name)
except KeyError:
continue
mounts[fs.path] = mount.target
container = cls(
name=service.name,
node=node,
mounts=mounts,
nics=nics,
hostname=service.model.data.hostname,
flist=service.model.data.flist,
ports=ports,
host_network=service.model.data.hostNetworking,
storage=service.model.data.storage,
init_processes=[p.to_dict() for p in service.model.data.initProcesses],
privileged=service.model.data.privileged,
identity=service.model.data.identity,
)
return container
@property
def id(self):
logger.debug("get container id")
info = self.info
if info:
return info['container']['id']
return
@property
def info(self):
logger.debug("get container info")
for containerid, container in self.node.client.container.list().items():
if self.name in (container['container']['arguments']['tags'] or []):
container['container']['id'] = int(containerid)
return container
return
@property
def client(self):
if self._client is None:
self._client = self.node.client.container.client(self.id)
return self._client
def upload_content(self, remote, content):
if isinstance(content, str):
content = content.encode('utf8')
bytes = BytesIO(content)
self.client.filesystem.upload(remote, bytes)
def download_content(self, remote):
buff = BytesIO()
self.client.filesystem.download(remote, buff)
return buff.getvalue().decode()
def _create_container(self, timeout=60):
logger.debug("send create container command to g8os")
tags = [self.name]
if self.hostname and self.hostname != self.name:
tags.append(self.hostname)
job = self.node.client.container.create(
root_url=self.flist,
mount=self.mounts,
host_network=self.host_network,
nics=self.nics,
port=self.ports,
tags=tags,
hostname=self.hostname,
storage=self.storage,
privileged=self.privileged,
identity=self.identity,
)
containerid = job.get(timeout)
self._client = self.node.client.container.client(containerid)
def is_job_running(self, cmd):
try:
for job in self._client.job.list():
arguments = job['cmd']['arguments']
if 'name' in arguments and arguments['name'] == cmd:
return job
return False
except Exception as err:
if str(err).find("invalid container id"):
return False
raise
def is_port_listening(self, port, timeout=60):
import time
start = time.time()
while start + timeout > time.time():
if port not in self.node.freeports(port, nrports=3):
return True
time.sleep(0.2)
return False
def start(self):
if not self.is_running():
logger.debug("start %s", self)
self._create_container()
for process in self.init_processes:
cmd = "{} {}".format(process['name'], ' '.join(process.get('args', [])))
pwd = process.get('pwd', '')
stdin = process.get('stdin', '')
env = {}
for x in process.get('environment', []):
k, v = x.split("=")
env[k] = v
self.client.system(command=cmd, dir=pwd, stdin=stdin, env=env)
def stop(self):
if not self.is_running():
return
logger.debug("stop %s", self)
self.node.client.container.terminate(self.id)
self._client = None
def is_running(self):
return self.id is not None
@property
def ays(self):
if self._ays is None:
from JumpScale.sal.g8os.atyourservice.StorageCluster import ContainerAYS
self._ays = ContainerAYS(self)
return self._ays
def __str__(self):
return "Container <{}>".format(self.name)
def __repr__(self):
return str(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/Container.py | Container.py |
import netaddr
def combine(ip1, ip2, mask):
"""
>>> combine('10.0.3.11', '192.168.1.10', 24)
'10.0.3.10'
"""
iip1 = netaddr.IPNetwork('{}/{}'.format(ip1, mask))
iip2 = netaddr.IPNetwork('{}/{}'.format(ip2, mask))
ires = iip1.network + int(iip2.ip & (~ int(iip2.netmask)))
net = netaddr.IPNetwork(ires)
net.prefixlen = mask
return net
class Network:
def __init__(self, node):
self.node = node
self._client = node.client
def get_management_info(self):
import netaddr
def get_nic_ip(nics, name):
for nic in nics:
if nic['name'] == name:
for ip in nic['addrs']:
return netaddr.IPNetwork(ip['addr'])
return
defaultgwdev = self._client.bash("ip route | grep default | awk '{print $5}'").get().stdout.strip()
nics = self._client.info.nic()
mgmtaddr = None
if defaultgwdev:
ipgwdev = get_nic_ip(nics, defaultgwdev)
if ipgwdev:
mgmtaddr = str(ipgwdev.ip)
if not mgmtaddr:
mgmtaddr = self.node.addr
return mgmtaddr
def get_addresses(self, network):
mgmtaddr = self.get_management_info()
return {
'storageaddr': combine(str(network.ip), mgmtaddr, network.prefixlen),
'vxaddr': combine('10.240.0.0', mgmtaddr, network.prefixlen),
}
def get_free_nics(self):
nics = self._client.info.nic()
nics.sort(key=lambda nic: nic['speed'])
availablenics = {}
for nic in nics:
# skip all interface that have an ipv4 address
if any(netaddr.IPNetwork(addr['addr']).version == 4 for addr in nic['addrs'] if 'addr' in addr):
continue
if nic['speed'] == 0:
continue
availablenics.setdefault(nic['speed'], []).append(nic['name'])
return sorted(availablenics.items())
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/Network.py | Network.py |
from js9 import j
import os
from zeroos.core0.client.client import Timeout
import json
import hashlib
class HealthCheckObject:
def __init__(self, id, name, category, resource):
self.id = id
self.name = name
self.category = category
self._messages = []
self.resource = resource
self.stacktrace = ''
def add_message(self, id, status, text):
self._messages.append({'id': id, 'text': text, 'status': status})
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'category': self.category,
'resource': self.resource,
'stacktrace': self.stacktrace or '',
'messages': self._messages
}
class HealthCheckRun(HealthCheckObject):
def start(self, *args, **kwargs):
try:
self.run(*args, **kwargs)
except Exception as e:
eco = j.errorhandler.parsePythonExceptionObject(e)
self.stacktrace = eco.traceback
return self.to_dict()
class IPMIHealthCheck(HealthCheckRun):
def execute_ipmi(self, container, cmd):
if self.node.client.filesystem.exists("/dev/ipmi") or self.node.client.filesystem.exists("/dev/ipmi0"):
return container.client.system(cmd).get().stdout
return ''
class ContainerContext:
def __init__(self, node, flist):
self.node = node
self.flist = flist
self.container = None
self._name = 'healthcheck_{}'.format(hashlib.md5(flist.encode()).hexdigest())
def __enter__(self):
try:
self.container = self.node.containers.get(self._name)
except LookupError:
self.container = self.node.containers.create(self._name, self.flist, host_network=True, privileged=True)
return self.container
def __exit__(self, exc_type, exc_val, exc_tb):
return
class HealthCheck:
def __init__(self, node):
self.node = node
self.healtcheckfolder = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'healthchecks')
def with_container(self, flist):
return ContainerContext(self.node, flist)
def run(self, container, name, timeout=None):
try:
healthcheckfile = os.path.join(self.healtcheckfolder, name + '.py')
if not os.path.exists(healthcheckfile):
raise RuntimeError("Healtcheck with name {} not found".format(name))
container.client.filesystem.upload_file('/tmp/{}.py'.format(name), healthcheckfile)
try:
job = container.client.bash('python3 /tmp/{}.py'.format(name))
response = job.get(timeout)
except Timeout:
container.client.job.kill(job.id, 9)
raise RuntimeError("Failed to execute {} on time".format(name))
if response.state == 'ERROR':
raise RuntimeError("Failed to execute {} {}".format(name, response.stdout))
try:
return json.loads(response.stdout)
except Exception:
raise RuntimeError("Failed to parse response of {}".format(name))
except Exception as e:
healtcheck = {
'id': name,
'status': 'ERROR',
'message': str(e)
}
return healtcheck
def cpu_mem(self):
from .healthchecks.cpu_mem_core import CPU, Memory
cpu = CPU(self.node)
memory = Memory(self.node)
return [cpu.start(), memory.start()]
def disk_usage(self):
from .healthchecks.diskusage import DiskUsage
usage = DiskUsage(self.node)
return usage.start()
def network_bond(self):
from .healthchecks.networkbond import NetworkBond
bond = NetworkBond(self.node)
return bond.start()
def node_temperature(self, container):
from .healthchecks.temperature import Temperature
temperature = Temperature(self.node)
result = temperature.start(container)
return result
def network_stability(self, nodes):
from .healthchecks.networkstability import NetworkStability
stability = NetworkStability(self.node)
return stability.start(nodes)
def rotate_logs(self):
from .healthchecks.log_rotator import RotateLogs
rotator = RotateLogs(self.node)
return rotator.start()
def openfiledescriptors(self):
from .healthchecks.openfiledescriptors import OpenFileDescriptor
ofd = OpenFileDescriptor(self.node)
return ofd.start()
def interrupts(self):
from .healthchecks.interrupts import Interrupts
inter = Interrupts(self.node)
return inter.start()
def threads(self):
from .healthchecks.threads import Threads
thread = Threads(self.node)
return thread.start()
def ssh_cleanup(self, job):
from .healthchecks.ssh_cleanup import SSHCleanup
cleaner = SSHCleanup(self.node, job)
return cleaner.start()
def powersupply(self, container):
from .healthchecks.powersupply import PowerSupply
powersupply = PowerSupply(self.node)
return powersupply.start(container)
def fan(self, container):
from .healthchecks.fan import Fan
fan = Fan(self.node)
return fan.start(container)
def context_switch(self):
from .healthchecks.context_switch import ContextSwitch
return ContextSwitch(self.node).start()
def network_load(self):
from .healthchecks.networkload import NetworkLoad
load = NetworkLoad(self.node)
return load.start()
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthcheck.py | healthcheck.py |
from zeroos.core0.client import Client
from .Disk import Disks, DiskType
from .Container import Containers
from .StoragePool import StoragePools
from .Network import Network
from .healthcheck import HealthCheck
from collections import namedtuple
from datetime import datetime
from io import BytesIO
import netaddr
Mount = namedtuple('Mount', ['device', 'mountpoint', 'fstype', 'options'])
class Node:
"""Represent a G8OS Server"""
def __init__(self, addr, port=6379, password=None, timeout=120):
# g8os client to talk to the node
self._client = Client(host=addr, port=port, password=password, timeout=timeout)
self._storageAddr = None
self.addr = addr
self.port = port
self.disks = Disks(self)
self.storagepools = StoragePools(self)
self.containers = Containers(self)
self.network = Network(self)
self.healthcheck = HealthCheck(self)
@classmethod
def from_ays(cls, service, password=None, timeout=120):
return cls(
addr=service.model.data.redisAddr,
port=service.model.data.redisPort,
password=password,
timeout=timeout
)
@property
def client(self):
return self._client
@property
def name(self):
def get_nic_hwaddr(nics, name):
for nic in nics:
if nic['name'] == name:
return nic['hardwareaddr']
defaultgwdev = self.client.bash("ip route | grep default | awk '{print $5}'").get().stdout.strip()
nics = self.client.info.nic()
macgwdev = None
if defaultgwdev:
macgwdev = get_nic_hwaddr(nics, defaultgwdev)
if not macgwdev:
raise AttributeError("name not find for node {}".format(self))
return macgwdev.replace(":", '')
@property
def storageAddr(self):
if not self._storageAddr:
nic_data = self.client.info.nic()
for nic in nic_data:
if nic['name'] == 'backplane':
for ip in nic['addrs']:
network = netaddr.IPNetwork(ip['addr'])
if network.version == 4:
self._storageAddr = network.ip.format()
return self._storageAddr
self._storageAddr = self.addr
return self._storageAddr
def get_nic_by_ip(self, addr):
try:
res = next(nic for nic in self._client.info.nic() if any(addr == a['addr'].split('/')[0] for a in nic['addrs']))
return res
except StopIteration:
return None
def _eligible_fscache_disk(self, disks):
"""
return the first disk that is eligible to be used as filesystem cache
First try to find a SSH disk, otherwise return a HDD
"""
priorities = [DiskType.ssd, DiskType.hdd, DiskType.nvme]
eligible = {t: [] for t in priorities}
# Pick up the first ssd
usedisks = []
for pool in (self._client.btrfs.list() or []):
for device in pool['devices']:
usedisks.append(device['path'])
for disk in disks[::-1]:
if disk.devicename in usedisks or len(disk.partitions) > 0:
continue
if disk.type in priorities:
eligible[disk.type].append(disk)
# pick up the first disk according to priorities
for t in priorities:
if eligible[t]:
return eligible[t][0]
else:
raise RuntimeError("cannot find eligible disks for the fs cache")
def _mount_fscache(self, storagepool):
"""
mount the fscache storage pool and copy the content of the in memmory fs inside
"""
mountedpaths = [mount.mountpoint for mount in self.list_mounts()]
containerpath = '/var/cache/containers'
if containerpath not in mountedpaths:
if storagepool.exists('containercache'):
storagepool.get('containercache').delete()
fs = storagepool.create('containercache')
self.client.disk.mount(storagepool.devicename, containerpath, ['subvol={}'.format(fs.subvolume)])
logpath = '/var/log'
if logpath not in mountedpaths:
# logs is empty filesystem which we create a snapshot on to store logs of current boot
snapname = '{:%Y-%m-%d-%H-%M}'.format(datetime.now())
fs = storagepool.get('logs')
snapshot = fs.create(snapname)
self.client.bash('mkdir /tmp/log && mv /var/log/* /tmp/log/')
self.client.disk.mount(storagepool.devicename, logpath, ['subvol={}'.format(snapshot.subvolume)])
self.client.bash('mv /tmp/log/* /var/log/').get()
self.client.logger.reopen()
# startup syslogd and klogd
self.client.system('syslogd -n -O /var/log/messages')
self.client.system('klogd -n')
def freeports(self, baseport=2000, nrports=3):
ports = self.client.info.port()
usedports = set()
for portInfo in ports:
if portInfo['network'] != "tcp":
continue
usedports.add(portInfo['port'])
freeports = []
while True:
if baseport not in usedports:
freeports.append(baseport)
if len(freeports) >= nrports:
return freeports
baseport += 1
def find_persistance(self, name='fscache'):
fscache_sp = None
for sp in self.storagepools.list():
if sp.name == name:
fscache_sp = sp
break
return fscache_sp
def ensure_persistance(self, name='fscache'):
"""
look for a disk not used,
create a partition and mount it to be used as cache for the g8ufs
set the label `fs_cache` to the partition
"""
disks = self.disks.list()
if len(disks) <= 0:
# if no disks, we can't do anything
return
# check if there is already a storage pool with the fs_cache label
fscache_sp = self.find_persistance(name)
# create the storage pool if we don't have one yet
if fscache_sp is None:
disk = self._eligible_fscache_disk(disks)
fscache_sp = self.storagepools.create(name, devices=[disk.devicename], metadata_profile='single', data_profile='single', overwrite=True)
fscache_sp.mount()
try:
fscache_sp.get('logs')
except ValueError:
fscache_sp.create('logs')
# mount the storage pool
self._mount_fscache(fscache_sp)
return fscache_sp
def download_content(self, remote):
buff = BytesIO()
self.client.filesystem.download(remote, buff)
return buff.getvalue().decode()
def upload_content(self, remote, content):
if isinstance(content, str):
content = content.encode('utf8')
bytes = BytesIO(content)
self.client.filesystem.upload(remote, bytes)
def wipedisks(self):
print('Wiping node {hostname}'.format(**self.client.info.os()))
mounteddevices = {mount['device']: mount for mount in self.client.info.disk()}
def getmountpoint(device):
for mounteddevice, mount in mounteddevices.items():
if mounteddevice.startswith(device):
return mount
jobs = []
for disk in self.client.disk.list()['blockdevices']:
devicename = '/dev/{}'.format(disk['kname'])
mount = getmountpoint(devicename)
if not mount:
print(' * Wiping disk {kname}'.format(**disk))
jobs.append(self.client.system('dd if=/dev/zero of={} bs=1M count=50'.format(devicename)))
else:
print(' * Not wiping {device} mounted at {mountpoint}'.format(device=devicename, mountpoint=mount['mountpoint']))
# wait for wiping to complete
for job in jobs:
job.get()
def list_mounts(self):
allmounts = []
for mount in self.client.info.disk():
allmounts.append(Mount(mount['device'],
mount['mountpoint'],
mount['fstype'],
mount['opts']))
return allmounts
def __str__(self):
return "Node <{host}:{port}>".format(
host=self.addr,
port=self.port,
)
def __repr__(self):
return str(self)
def __eq__(self, other):
a = "{}:{}".format(self.addr, self.port)
b = "{}:{}".format(other.addr, other.port)
return a == b
def __hash__(self):
return hash((self.addr, self.port))
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/Node.py | Node.py |
import ssl
import json
import aioredis
import sys
import uuid
import time
import logging
import asyncio
logger = logging.getLogger('g8core')
class Response:
def __init__(self, client, id):
self._client = client
self._id = id
self._queue = 'result:{}'.format(id)
async def exists(self):
r = self._client._redis
flag = '{}:flag'.format(self._queue)
key_exists = await r.connection.execute('LKEYEXISTS', flag)
return bool(key_exists)
async def get(self, timeout=None):
if timeout is None:
timeout = self._client.timeout
r = self._client._redis
start = time.time()
maxwait = timeout
while maxwait > 0:
job_exists = await self.exists()
if not job_exists:
raise RuntimeError("Job not found: %s" % self.id)
v = await r.brpoplpush(self._queue, self._queue, min(maxwait, 10))
if v is not None:
return json.loads(v.decode())
logger.debug('%s still waiting (%ss)', self._id, int(time.time() - start))
maxwait -= 10
raise TimeoutError()
class Pubsub:
def __init__(self, loop, host, port=6379, password="", db=0, ctx=None, timeout=None, testConnectionAttempts=3, callback=None):
socket_timeout = (timeout + 5) if timeout else 15
self.testConnectionAttempts = testConnectionAttempts
self._redis = None
self.host = host
self.port = port
self.password = password
self.db = db
if ctx is None:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.ssl = ctx
self.timeout = socket_timeout
self.loop = loop
async def default_callback(job_id, level, line, meta):
w = sys.stdout if level == 1 else sys.stderr
w.write(line)
w.write('\n')
self.callback = callback or default_callback
if not callable(self.callback):
raise Exception('callback must be callable')
async def get(self):
if self._redis is not None:
return self._redis
self._redis = await aioredis.create_redis((self.host, self.port),
loop=self.loop,
password=self.password,
db=self.db,
ssl=self.ssl,
timeout=self.timeout)
return self._redis
async def global_stream(self, queue, timeout=120):
if self._redis.connection.closed:
self._redis = await self.get()
data = await asyncio.wait_for(self._redis.blpop(queue, timeout=timeout), timeout=timeout)
_, body = data
payload = json.loads(body.decode())
message = payload['message']
line = message['message']
meta = message['meta']
job_id = payload['command']
await self.callback(job_id, meta >> 16, line, meta & 0xff)
async def raw(self, command, arguments, queue=None, max_time=None, stream=False, tags=None, id=None):
if not id:
id = str(uuid.uuid4())
payload = {
'id': id,
'command': command,
'arguments': arguments,
'queue': queue,
'max_time': max_time,
'stream': stream,
'tags': tags,
}
self._redis = await self.get()
flag = 'result:{}:flag'.format(id)
await self._redis.rpush('core:default', json.dumps(payload))
if await self._redis.brpoplpush(flag, flag, 10) is None:
raise TimeoutError('failed to queue job {}'.format(id))
logger.debug('%s >> g8core.%s(%s)', id, command, ', '.join(("%s=%s" % (k, v) for k, v in arguments.items())))
return Response(self, id)
async def sync(self, command, args):
response = await self.raw(command, args)
result = await response.get()
if result["state"] != 'SUCCESS':
raise RuntimeError('invalid response: %s' % result["state"])
return json.loads(result["data"])
async def ping(self):
response = await self.sync('core.ping', {})
return response
async def subscribe(self, queue=None):
response = await self.sync('logger.subscribe', {'queue': queue})
return response
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/Pubsub.py | Pubsub.py |
import io
import time
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class StorageEngine:
"""storageEngine server"""
def __init__(self, name, container, bind='0.0.0.0:16379', data_dir='/mnt/data', master=None):
"""
TODO: write doc string
"""
self.name = name
self.master = master
self.container = container
self.bind = bind
self.port = int(bind.split(':')[1])
self.data_dir = data_dir
self.master = master
self._ays = None
@classmethod
def from_ays(cls, service, password=None):
logger.debug("create storageEngine from service (%s)", service)
from .Container import Container
container = Container.from_ays(service.parent, password)
if service.model.data.master != '':
master_service = service.aysrepo.serviceGet('storage_engine', service.model.data.master)
master = StorageEngine.from_ays(master_service, password)
else:
master = None
return cls(
name=service.name,
container=container,
bind=service.model.data.bind,
data_dir=service.model.data.homeDir,
master=master,
)
def _configure(self):
logger.debug("configure storageEngine")
buff = io.BytesIO()
self.container.client.filesystem.download('/etc/ardb.conf', buff)
content = buff.getvalue().decode()
# update config
content = content.replace('/mnt/data', self.data_dir)
content = content.replace('0.0.0.0:16379', self.bind)
mgmt_bind = "%s:%s" % (self.container.node.addr, self.port)
if self.bind != mgmt_bind:
content += "server[1].listen %s\n" % mgmt_bind
if self.master is not None:
_, port = self.master.bind.split(":")
content = content.replace('#slaveof 127.0.0.1:6379', 'slaveof {}:{}'.format(self.master.container.node.addr, port))
# make sure home directory exists
self.container.client.filesystem.mkdir(self.data_dir)
# upload new config
self.container.client.filesystem.upload('/etc/ardb.conf.used', io.BytesIO(initial_bytes=content.encode()))
def start(self, timeout=100):
if not self.container.is_running():
self.container.start()
running, _ = self.is_running()
if running:
return
logger.debug('start %s', self)
self._configure()
self.container.client.system('/bin/ardb-server /etc/ardb.conf.used', id="{}.{}".format("storage_engine", self.name))
# wait for storageEngine to start
start = time.time()
end = start + timeout
is_running, _ = self.is_running()
while not is_running and time.time() < end:
time.sleep(1)
is_running, _ = self.is_running()
if not is_running:
raise RuntimeError("storage server {} didn't started".format(self.name))
def stop(self, timeout=30):
if not self.container.is_running():
return
is_running, job = self.is_running()
if not is_running:
return
logger.debug('stop %s', self)
self.container.client.job.kill(job['cmd']['id'])
# wait for StorageEngine to stop
start = time.time()
end = start + timeout
is_running, _ = self.is_running()
while is_running and time.time() < end:
time.sleep(1)
is_running, _ = self.is_running()
if is_running:
raise RuntimeError("storage server {} didn't stopped")
def is_healthy(self):
import redis
client = redis.Redis(self.container.node.addr, self.port)
key = "keytest"
value = b"some test value"
if not client.set(key, value):
return False
result = client.get(key)
if result != value:
return False
client.delete(key)
if client.exists(key):
return False
return True
def is_running(self):
try:
if self.port not in self.container.node.freeports(self.port, 1):
for job in self.container.client.job.list():
if 'name' in job['cmd']['arguments'] and job['cmd']['arguments']['name'] == '/bin/ardb-server':
return (True, job)
return (False, None)
except Exception as err:
if str(err).find("invalid container id"):
return (False, None)
raise
@property
def ays(self):
if self._ays is None:
from JumpScale.sal.g8os.atyourservice.StorageCluster import storageEngineAys
self._ays = storageEngineAys(self)
return self._ays
def __str__(self):
return "storageEngine <{}>".format(self.name)
def __repr__(self):
return str(self)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/StorageEngine.py | StorageEngine.py |
from io import BytesIO
import logging
import yaml
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class EtcdCluster:
"""etced server"""
def __init__(self, name, dialstrings):
self.name = name
self.dialstrings = dialstrings
self._ays = None
@classmethod
def from_ays(cls, service, password=None):
logger.debug("create storageEngine from service (%s)", service)
dialstrings = set()
for etcd_service in service.producers.get('etcd', []):
etcd = ETCD.from_ays(etcd_service, password)
dialstrings.add(etcd.clientBind)
return cls(
name=service.name,
dialstrings=",".join(dialstrings),
)
class ETCD:
"""etced server"""
def __init__(self, name, container, serverBind, clientBind, peers, data_dir='/mnt/data'):
self.name = name
self.container = container
self.serverBind = serverBind
self.clientBind = clientBind
self.data_dir = data_dir
self.peers = ",".join(peers)
self._ays = None
@classmethod
def from_ays(cls, service, password=None):
logger.debug("create storageEngine from service (%s)", service)
from .Container import Container
container = Container.from_ays(service.parent, password)
return cls(
name=service.name,
container=container,
serverBind=service.model.data.serverBind,
clientBind=service.model.data.clientBind,
data_dir=service.model.data.homeDir,
peers=service.model.data.peers,
)
def start(self):
configpath = "/etc/etcd_{}.config".format(self.name)
config = {
"name": self.name,
"initial-advertise-peer-urls": "http://{}".format(self.serverBind),
"listen-peer-urls": "http://{}".format(self.serverBind),
"listen-client-urls": "http://{}".format(self.clientBind),
"advertise-client-urls": "http://{}".format(self.clientBind),
"initial-cluster": self.peers,
"data-dir": self.data_dir,
"initial-cluster-state": "new"
}
yamlconfig = yaml.safe_dump(config, default_flow_style=False)
configstream = BytesIO(yamlconfig.encode('utf8'))
configstream.seek(0)
self.container.client.filesystem.upload(configpath, configstream)
cmd = '/bin/etcd --config-file %s' % configpath
self.container.client.system(cmd, id="etcd.{}".format(self.name))
if not self.container.is_port_listening(int(self.serverBind.split(":")[1])):
raise RuntimeError('Failed to start etcd server: {}'.format(self.name))
def put(self, key, value):
if value.startswith("-"):
value = "-- %s" % value
if key.startswith("-"):
key = "-- %s" % key
cmd = '/bin/etcdctl \
--endpoints {etcd} \
put {key} "{value}"'.format(etcd=self.clientBind, key=key, value=value)
return self.container.client.system(cmd, env={"ETCDCTL_API": "3"}).get()
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/ETCD.py | ETCD.py |
0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/__init__.py | __init__.py |
|
import json
from js9 import j
from .StorageEngine import StorageEngine
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class StorageCluster:
"""StorageCluster is a cluster of StorageEngine servers"""
def __init__(self, label, nodes=None, disk_type=None):
"""
@param label: string repsenting the name of the storage cluster
"""
self.label = label
self.name = label
self.nodes = nodes or []
self.filesystems = []
self.storage_servers = []
self.disk_type = disk_type
self.k = 0
self.m = 0
self._ays = None
@classmethod
def from_ays(cls, service, password):
logger.debug("load cluster storage cluster from service (%s)", service)
disk_type = str(service.model.data.diskType)
nodes = []
storage_servers = []
for storageEngine_service in service.producers.get('storage_engine', []):
storages_server = StorageServer.from_ays(storageEngine_service, password)
storage_servers.append(storages_server)
if storages_server.node not in nodes:
nodes.append(storages_server.node)
cluster = cls(label=service.name, nodes=nodes, disk_type=disk_type)
cluster.storage_servers = storage_servers
cluster.k = service.model.data.k
cluster.m = service.model.data.m
return cluster
@property
def dashboard(self):
board = StorageDashboard(self)
return board.template
def get_config(self):
data = {'dataStorage': [],
'metadataStorage': None,
'label': self.name,
'status': 'ready' if self.is_running() else 'error',
'nodes': [node.name for node in self.nodes]}
for storageserver in self.storage_servers:
if 'metadata' in storageserver.name:
data['metadataStorage'] = {'address': storageserver.storageEngine.bind}
else:
data['dataStorage'].append({'address': storageserver.storageEngine.bind})
return data
@property
def nr_server(self):
"""
Number of storage server part of this cluster
"""
return len(self.storage_servers)
def find_disks(self):
"""
return a list of disk that are not used by storage pool
or has a different type as the one required for this cluster
"""
logger.debug("find available_disks")
cluster_name = 'sp_cluster_{}'.format(self.label)
available_disks = {}
def check_partition(disk):
for partition in disk.partitions:
for filesystem in partition.filesystems:
if filesystem['label'].startswith(cluster_name):
return True
for node in self.nodes:
for disk in node.disks.list():
# skip disks of wrong type
if disk.type.name != self.disk_type:
continue
# skip devices which have filesystems on the device
if len(disk.filesystems) > 0:
continue
# include devices which have partitions
if len(disk.partitions) == 0:
available_disks.setdefault(node.name, []).append(disk)
else:
if check_partition(disk):
# devices that have partitions with correct label will be in the beginning
available_disks.setdefault(node.name, []).insert(0, disk)
return available_disks
def start(self):
logger.debug("start %s", self)
for server in self.storage_servers:
server.start()
def stop(self):
logger.debug("stop %s", self)
for server in self.storage_servers:
server.stop()
def is_running(self):
# TODO: Improve this, what about part of server running and part stopped
for server in self.storage_servers:
if not server.is_running():
return False
return True
def health(self):
"""
Return a view of the state all storage server running in this cluster
example :
{
'cluster1_1': {'storageEngine': True, 'container': True},
'cluster1_2': {'storageEngine': True, 'container': True},
}
"""
health = {}
for server in self.storage_servers:
running, _ = server.storageEngine.is_running()
health[server.name] = {
'storageEngine': running,
'container': server.container.is_running(),
}
return health
def __str__(self):
return "StorageCluster <{}>".format(self.label)
def __repr__(self):
return str(self)
class StorageServer:
"""StorageEngine servers"""
def __init__(self, cluster):
self.cluster = cluster
self.container = None
self.storageEngine = None
@classmethod
def from_ays(cls, storageEngine_services, password=None):
storageEngine = StorageEngine.from_ays(storageEngine_services, password)
storage_server = cls(None)
storage_server.container = storageEngine.container
storage_server.storageEngine = storageEngine
return storage_server
@property
def name(self):
if self.storageEngine:
return self.storageEngine.name
return None
@property
def node(self):
if self.container:
return self.container.node
return None
def _find_port(self, start_port=2000):
while True:
if j.sal.nettools.tcpPortConnectionTest(self.node.addr, start_port, timeout=2):
start_port += 1
continue
return start_port
def start(self, timeout=30):
logger.debug("start %s", self)
if not self.container.is_running():
self.container.start()
ip, port = self.storageEngine.bind.split(":")
self.storageEngine.bind = '{}:{}'.format(ip, self._find_port(port))
self.storageEngine.start(timeout=timeout)
def stop(self, timeout=30):
logger.debug("stop %s", self)
self.storageEngine.stop(timeout=timeout)
self.container.stop()
def is_running(self):
container = self.container.is_running()
storageEngine, _ = self.storageEngine.is_running()
return (container and storageEngine)
def __str__(self):
return "StorageServer <{}>".format(self.container.name)
def __repr__(self):
return str(self)
class StorageDashboard:
def __init__(self, cluster):
self.cluster = cluster
self.store = 'statsdb'
def build_templating(self):
templating = {
"list": [],
"rows": []
}
return templating
def dashboard_template(self):
return {
"annotations": {
"list": []
},
"editable": True,
"gnetId": None,
"graphTooltip": 0,
"hideControls": False,
"id": None,
"links": [],
"rows": [],
"schemaVersion": 14,
"style": "dark",
"tags": [],
"time": {
"from": "now/d",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": self.cluster.name,
"version": 8
}
def build_row(self, panel):
template = {
"collapse": False,
"height": 295,
"panels": [],
"repeat": None,
"repeatIteration": None,
"repeatRowId": None,
"showTitle": False,
"title": "Dashboard Row",
"titleSize": "h6"
}
template["panels"] += panel
return template
def build_panel(self, title, target, panel_id, unit):
template = {
"aliasColors": {},
"bars": False,
"dashLength": 10,
"dashes": False,
"datasource": self.store,
"fill": 1,
"id": panel_id,
"legend": {
"avg": False,
"current": False,
"max": False,
"min": False,
"show": True,
"total": False,
"values": False
},
"lines": True,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"percentage": False,
"pointradius": 5,
"points": False,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"span": 6,
"stack": True,
"steppedLine": False,
"targets": [],
"thresholds": [],
"timeFrom": None,
"timeShift": None,
"tooltip": {
"shared": True,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": None,
"mode": "time",
"name": None,
"show": True,
"values": []
},
"yaxes": [
{
"format": unit,
"label": None,
"logBase": 1,
"max": None,
"min": None,
"show": True
},
{
"format": "short",
"label": None,
"logBase": 1,
"max": None,
"min": None,
"show": True
}
]
}
template["title"] = title
template["targets"].append(target)
return template
def build_target(self, measurement, disks):
template = {
"alias": "$tag_node/$tag_id",
"dsType": "influxdb",
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"node"
],
"type": "tag"
},
{
"params": [
"id"
],
"type": "tag"
},
{
"params": [
"none"
],
"type": "fill"
}
],
"orderByTime": "ASC",
"policy": "default",
"rawQuery": False,
"refId": "A",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"value"
],
"type": "field"
},
{
"params": [],
"type": "mean"
}
]
],
"tags": [
{
"key": "type",
"operator": "=",
"value": "phys"
}
]
}
template["measurement"] = measurement
for idx, disk in enumerate(disks):
tag = [
{
"key": "node",
"operator": "=",
"value": disk.split("_")[0]
},
{
"condition": "AND",
"key": "id",
"operator": "=",
"value": disk.split("_")[1]
}
]
if idx == 0:
tag[0]["condition"] = "AND"
else:
tag[0]["condition"] = "OR"
template["tags"] += tag
return template
@property
def template(self):
AGGREGATED_CONFIG = {
"Aggregated read IOPs": "disk.iops.read|m",
"Aggregated write IOPs": "disk.iops.write|m",
"Aggregated free size": "disk.size.free|m",
}
panel_id = 1
disks = set()
for server in self.cluster.storage_servers:
server = server.name.split("_")
disks.add("{}_{}".format(server[1], server[-3]))
disks = list(disks)
panels = []
for title, measurement in AGGREGATED_CONFIG.items():
if 'size' in title:
partitions = [disk+'1' for disk in disks]
target = self.build_target(measurement, partitions)
panels.append(self.build_panel(title, target, panel_id, "decbytes"))
else:
target = self.build_target(measurement, disks)
panels.append(self.build_panel(title, target, panel_id, "iops"))
panel_id += 1
for disk in disks:
target = self.build_target("disk.iops.read|m", [disk])
panels.append(self.build_panel("Read IOPs", target, panel_id, "iops"))
panel_id += 1
target = self.build_target("disk.iops.write|m", [disk])
panels.append(self.build_panel("Write IOPs", target, panel_id, "iops"))
panel_id += 1
target = self.build_target("disk.size.free|m", [disk+'1'])
panels.append(self.build_panel("Free size", target, panel_id, "decbytes"))
panel_id += 1
template = self.dashboard_template()
for idx, panel in enumerate(panels):
if idx % 2 == 0:
row = self.build_row(panels[idx:idx+2])
template["rows"].append(row)
template = json.dumps(template)
return template
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/StorageCluster.py | StorageCluster.py |
from ..abstracts import AYSable
from js9 import j
class StoragePoolAys(AYSable):
def __init__(self, storagepool):
self._obj = storagepool
self.actor = 'storagepool'
def create(self, aysrepo):
try:
service = aysrepo.serviceGet(role='storagepool', instance=self._obj.name)
except j.exceptions.NotFound:
service = None
device_map, pool_status = self._obj.get_devices_and_status()
if service is None:
# create new service
actor = aysrepo.actorGet(self.actor)
args = {
'metadataProfile': self._obj.fsinfo['metadata']['profile'],
'dataProfile': self._obj.fsinfo['data']['profile'],
'devices': device_map,
'node': self._node_name,
'status': pool_status,
}
service = actor.serviceCreate(instance=self._obj.name, args=args)
else:
# update model on exists service
service.model.data.init('devices', len(device_map))
for i, device in enumerate(device_map):
service.model.data.devices[i] = device
service.model.data.status = pool_status
service.saveAll()
return service
@property
def _node_name(self):
def is_valid_nic(nic):
for exclude in ['zt', 'core', 'kvm', 'lo']:
if nic['name'].startswith(exclude):
return False
return True
for nic in filter(is_valid_nic, self._obj.node.client.info.nic()):
if len(nic['addrs']) > 0 and nic['addrs'][0]['addr'] != '':
return nic['hardwareaddr'].replace(':', '')
raise AttributeError("name not find for node {}".format(self._obj.node))
class FileSystemAys(AYSable):
def __init__(self, filesystem):
self._obj = filesystem
self.actor = 'filesystem'
def create(self, aysrepo):
actor = aysrepo.actorGet(self.actor)
args = {
'storagePool': self._obj.pool.name,
'name': self._obj.name,
# 'readOnly': ,FIXME
# 'quota': ,FIXME
}
return actor.serviceCreate(instance=self._obj.name, args=args)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/atyourservice/StoragePool.py | StoragePool.py |
from zeroos.orchestrator.sal import templates
import signal
import time
class HTTPServer:
def __init__(self, container, httpproxies):
self.container = container
self.httpproxies = httpproxies
def apply_rules(self):
# caddy
caddyconfig = templates.render('caddy.conf', httpproxies=self.httpproxies)
self.container.upload_content('/etc/caddy.conf', caddyconfig)
job = self.get_job()
if job:
self.container.client.job.kill(job['cmd']['id'], int(signal.SIGUSR1))
else:
self.container.client.system(
'caddy -agree -conf /etc/caddy.conf', stdin='\n', id='http.{}'.format(self.container.name))
start = time.time()
while start + 10 > time.time():
if self.is_running():
return True
time.sleep(0.5)
raise RuntimeError("Failed to start caddy server")
def get_job(self):
for job in self.container.client.job.list():
cmd = job['cmd']
if cmd['command'] != 'core.system':
continue
if cmd['arguments']['name'] == 'caddy':
return job
def is_running(self):
for port in self.container.client.info.port():
if port['network'].startswith('tcp') and port['port'] in [80, 443]:
return True
return False
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/gateway/http.py | http.py |
import time
class CloudInit:
def __init__(self, container, config):
self.container = container
self.config = config
self.CONFIGPATH = "/etc/cloud-init"
def apply_config(self):
self.cleanup(self.config.keys())
for key, value in self.config.items():
fpath = "%s/%s" % (self.CONFIGPATH, key)
self.container.upload_content(fpath, value)
if not self.is_running():
self.start()
def cleanup(self, macaddresses):
configs = self.container.client.filesystem.list(self.CONFIGPATH)
for config in configs:
if config["name"] not in macaddresses:
self.container.client.filesystem.remove("%s/%s" % (self.CONFIGPATH, config["name"]))
def start(self):
if not self.is_running():
self.container.client.system(
'cloud-init-server \
-bind 127.0.0.1:8080 \
-config {config}'
.format(config=self.CONFIGPATH),
id='cloudinit.{}'.format(self.container.name))
start = time.time()
while time.time() + 10 > start:
if self.is_running():
return
time.sleep(0.5)
raise RuntimeError('Failed to start cloudinit server')
def is_running(self):
for port in self.container.client.info.port():
if port['network'] == 'tcp' and port['port'] == 8080 and port['ip'] == '127.0.0.1':
return True
return False
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/gateway/cloudinit.py | cloudinit.py |
from zeroos.orchestrator.sal import templates
import ipaddress
class Network:
def __init__(self, iface, cidr):
self.iface = iface
ipiface = ipaddress.IPv4Interface(cidr)
self.ipaddress = str(ipiface.ip)
self.subnet = str(ipiface.network)
class Firewall:
def __init__(self, container, publicnetwork, privatenetworks, forwards):
'''
'''
self.container = container
self.publicnetwork = publicnetwork
self.privatenetworks = privatenetworks
self.forwards = forwards
def apply_rules(self):
# nftables
nftables = templates.render('nftables.conf',
privatenetworks=self.privatenetworks,
publicnetwork=self.publicnetwork,
portforwards=self.forwards)
self.container.upload_content('/etc/nftables.conf', nftables)
job = self.container.client.system('nft -f /etc/nftables.conf').get()
if job.state != 'SUCCESS':
raise RuntimeError("Failed to apply nftables {} {}".format(job.stdout, job.stderr))
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/gateway/firewall.py | firewall.py |
import signal
import time
from zeroos.orchestrator.sal import templates
DNSMASQ = '/bin/dnsmasq --conf-file=/etc/dnsmasq.conf -d'
class DHCP:
def __init__(self, container, domain, dhcps):
self.container = container
self.domain = domain
self.dhcps = dhcps
def apply_config(self):
dnsmasq = templates.render('dnsmasq.conf', domain=self.domain, dhcps=self.dhcps)
self.container.upload_content('/etc/dnsmasq.conf', dnsmasq)
dhcp = templates.render('dhcp', dhcps=self.dhcps)
self.container.upload_content('/etc/dhcp', dhcp)
for process in self.container.client.process.list():
if 'dnsmasq' in process['cmdline']:
self.container.client.process.kill(process['pid'], signal.SIGTERM)
start = time.time()
while start + 10 > time.time():
if not self.is_running():
break
time.sleep(0.2)
break
self.container.client.system(DNSMASQ, id='dhcp.{}'.format(self.container.name))
# check if command is listening for dhcp
start = time.time()
while start + 10 > time.time():
if self.is_running():
break
time.sleep(0.2)
else:
raise RuntimeError('Failed to run dnsmasq')
def is_running(self):
for port in self.container.client.info.port():
if port['network'] == 'udp' and port['port'] == 53:
return True
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/gateway/dhcp.py | dhcp.py |
import re
import os
import datetime
from ..healthcheck import HealthCheckRun
descr = """
Rotate know log files if their size hit 10G or more
"""
class RotateLogs(HealthCheckRun):
def __init__(self, node):
resource = '/nodes/{}'.format(node.name)
super().__init__('log-rotator', 'Log Rotator', 'System Load', resource)
self.node = node
def run(self, locations=['/var/log'], limit=10):
message = {
'id': 'log-rotator',
'status': 'OK',
'text': 'Logs did not reach {limit}G'.format(limit=limit)
}
if "/var/log" not in locations:
locations.append("/var/log")
logs = []
try:
# Get total size for all log files
log_size = get_log_size(self.node, locations)
# Rotate logs if they are larger than the limit
if log_size/(1024 * 1024 * 1024) >= limit: # convert bytes to GIGA
# Rotate core.log
for location in locations:
# Get Files for this location
location_files = get_files(self.node, location, [])
logs.extend(location_files)
for file_path in logs:
if file_path == '/var/log/core.log':
continue
# Delete old rotated files to free up some space
# match file.*.date.time
if re.match(".*\d{8}-\d{6}", file_path):
self.node.client.filesystem.remove(file_path)
else:
new_path = "%s.%s" % (file_path, datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
# Rotate the new logs
self.node.client.filesystem.move(file_path, new_path)
# Create a new EMPTY file with the same name
fd = self.node.client.filesystem.open(file_path, 'x')
self.node.client.filesystem.close(fd)
self.node.client.logger.reopen()
message['text'] = 'Logs cleared'
except Exception as e:
message['text'] = "Error happened, Can not clear logs"
message['status'] = "ERROR"
self.add_message(**message)
def get_files(node, location, files=[]):
for item in node.client.filesystem.list(location):
if not item['is_dir']:
files.append(os.path.join(location, item['name']))
else:
files = get_files(node, os.path.join(location, item['name']), files)
return files
def get_log_size(node, locations):
size = 0
for location in locations:
items = node.client.filesystem.list(location)
for item in items:
size += item['size']
return size
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthchecks/log_rotator.py | log_rotator.py |
from ..healthcheck import IPMIHealthCheck
descr = """
Checks the fans of a node using IPMItool.
Result will be shown in the "Hardware" section of the Grid Portal / Status Overview / Node Status page.
"""
class Fan(IPMIHealthCheck):
def __init__(self, node):
self.node = node
resource = '/nodes/{}'.format(node.name)
super().__init__(id='fan', name="Fan", category="Hardware", resource=resource)
def run(self, container):
out = self.execute_ipmi(container, """ipmitool sdr type "Fan" """)
if out:
# SAMPLE:
# root@du-conv-3-01:~# ipmitool sdr type "Fan"
# FAN1 | 41h | ok | 29.1 | 5000 RPM
# FAN2 | 42h | ns | 29.2 | No Reading
# FAN3 | 43h | ok | 29.3 | 4800 RPM
# FAN4 | 44h | ns | 29.4 | No Reading
for line in out.splitlines():
parts = [part.strip() for part in line.split("|")]
id_, sensorstatus, text = parts[0], parts[2], parts[-1]
if sensorstatus == "ns" and "no reading" in text.lower():
self.add_message(id=id_, status='SKIPPED', text="Fan {id} has no reading ({text})".format(id=id_, text=text))
elif sensorstatus != "ok" and "no reading" not in text.lower():
self.add_message(id=id_, status='WARNING', text="Fan {id} has problem ({text})".format(id=id_, text=text))
elif sensorstatus == 'ok':
self.add_message(id=id_, status="OK", text="Fan {id} is working at ({text})".format(id=id_, text=text))
else:
self.add_message(id="SKIPPED", status="SKIPPED", text="NO fan information available")
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthchecks/fan.py | fan.py |
import psutil
from ..healthcheck import HealthCheckRun
descr = """
Check open file descriptors for each node process, if it exceeds 90% of the soft limit, it raises a warning,
if it exceeds 90% of the hard limit, it raises an error.
"""
class OpenFileDescriptor(HealthCheckRun):
def __init__(self, node):
resource = '/nodes/{}'.format(node.name)
super().__init__('openfile-descriptors', 'Open File Descriptors', 'System Load', resource)
self.node = node
def run(self):
for process in self.node.client.process.list():
for rlimit in process['rlimit']:
if rlimit['resource'] == psutil.RLIMIT_NOFILE:
pid = str(process['pid'])
if (0.9 * rlimit['soft']) <= process['ofd'] < (0.9 * rlimit['hard']):
self.add_message(pid, 'WARNING', 'Open file descriptors for process %s exceeded 90%% of the soft limit' % pid)
elif process['ofd'] >= (0.9 * rlimit['hard']):
self.add_message(pid, 'ERROR', 'Open file descriptors for process %s exceeded 90%% of the hard limit' % pid)
break
if not self._messages:
self.add_message('-1', 'OK', 'Open file descriptors for all processes are within limit')
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthchecks/openfiledescriptors.py | openfiledescriptors.py |
import time
from ..healthcheck import HealthCheckRun
descr = """
Monitors CPU context switching
"""
class ContextSwitch(HealthCheckRun):
def __init__(self, node, warn=600000, error=1000000):
super().__init__('context-switch', 'Context Switch', 'Hardware', '/nodes/{}'.format(node.name))
self.node = node
self._warn = warn
self._error = error
def run(self):
client = self.node.client
state = client.aggregator.query('machine.CPU.contextswitch').get('machine.CPU.contextswitch')
if state is None:
# nothing to check yet
return self.add_message(self.id, 'WARNING', 'Number of context-switches per second is not collected yet')
# time of last reported value
last_time = state['last_time']
current = state['current']['300']
# start time of the current 5min sample
current_time = current['start']
if current_time < time.time() - (10*60):
return self.add_message(self.id, 'WARNING', 'Last collected context-switch are too far in the past')
# calculate avg per second
value = current['avg'] / (last_time - current_time)
status = 'OK'
text = 'Context-switches are okay'
if value >= self._error:
status = 'ERROR'
text = 'Contex-switches exceeded error threshold of {} ({})'.format(self._error, value)
elif value >= self._warn:
status = 'WARNING'
text = 'Contex-switches exceeded warning threshold of {} ({})'.format(self._warn, value)
return self.add_message(self.id, status, text)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthchecks/context_switch.py | context_switch.py |
import re
from ..healthcheck import HealthCheckRun
descr = """
Monitors if a network bond (if there is one) has both (or more) interfaces properly active.
"""
class NetworkStability(HealthCheckRun):
def __init__(self, node):
resource = '/nodes/{}'.format(node.name)
super().__init__('networkstability', 'Network Stability Check', 'Network',resource)
self.node = node
def run(self, nodes):
nic = self.node.get_nic_by_ip(self.node.addr)
if nic is None:
raise LookupError("Couldn't get the management nic")
jobs = []
for node in nodes:
other_nic = node.get_nic_by_ip(node.addr)
if other_nic is not None:
if nic['mtu'] != other_nic['mtu']:
self.add_message('{}_mtu'.format(node.name), 'ERROR', 'The management interface has mtu {} which is different than node {} which is {}'.format(nic['mtu'], node.name, other_nic['mtu']))
else:
self.add_message('{}_mtu'.format(node.name), 'OK', 'The management interface has mtu {} is the same as node {}'.format(nic['mtu'], node.name, other_nic['mtu']))
else:
self.add_message('{}_mtu'.format(node.name), 'ERROR', "Couldn't get the management nic for node {}".format(node.name))
jobs.append(self.node.client.system('ping -I {} -c 10 -W 1 -q {}'.format(self.node.addr, node.addr), max_time=20))
for node, job in zip(nodes, jobs):
res = job.get().stdout.split('\n')
perc = 100 - int(res[2].split(',')[-1].strip().split()[0][:-1])
if perc < 70:
self.add_message('{}_ping_perc'.format(node.name), 'ERROR', "Can reach node {} with percentage {}".format(node.name, perc))
elif perc < 90:
self.add_message('{}_ping_perc'.format(node.name), 'WARNING', "Can reach node {} with percentage {}".format(node.name, perc))
else:
self.add_message('{}_ping_perc'.format(node.name), 'OK', "Can reach node {} with percentage {}".format(node.name, perc))
if perc == 0:
self.add_message('{}_ping_rt'.format(node.name), 'ERROR', "Can't reach node {}".format(node.name))
else:
rt = float(res[3].split('/')[3])
if rt > 200:
self.add_message('{}_ping_rt'.format(node.name), 'ERROR', "Round-trip time to node {} is {}".format(node.name, rt))
elif rt > 10:
self.add_message('{}_ping_rt'.format(node.name), 'WARNING', "Round-trip time to node {} is {}".format(node.name, rt))
else:
self.add_message('{}_ping_rt'.format(node.name), 'OK', "Round-trip time to node {} is {}".format(node.name, rt))
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthchecks/networkstability.py | networkstability.py |
from ..healthcheck import IPMIHealthCheck
descr = """
Checks temperature of the system.
Result will be shown in the "Temperature" section of the Grid Portal / Status Overview / Node Status page.
"""
class Temperature(IPMIHealthCheck):
WARNING_TRIPPOINT = 70
ERROR_TRIPPOINT = 90
def __init__(self, node):
self.node = node
resource = '/nodes/{}'.format(node.name)
super().__init__('temperature', 'Node Temperature Check', 'Hardware', resource)
def run(self, container):
out = self.execute_ipmi(container, "ipmitool sdr type 'Temp'")
if out:
if out:
# SAMPLE:
# root@du-conv-3-01:~# ipmitool sdr type "Temp"
# Temp | 0Eh | ok | 3.1 | 37 degrees C
# Temp | 0Fh | ok | 3.2 | 34 degrees C
# Inlet Temp | B1h | ok | 64.96 | 28 degrees C
# Exhaust Temp | B2h | ns | 144.96 | Disabled
for line in out.splitlines():
if "|" in line:
parts = [part.strip() for part in line.split("|")]
id_, sensorstatus, message = parts[0], parts[2], parts[-1]
if sensorstatus == "ns" and "no reading" in message.lower():
continue
if sensorstatus != "ok" and "no reading" not in message.lower():
self.add_message(**self.get_message(sensor=id_, status='WARNING', message=message))
continue
temperature = int(message.split(" ", 1)[0])
self.add_message(**self.get_message(sensor=id_, status=sensorstatus, message=message, temperature=temperature))
else:
self.add_message(**self.get_message(status="SKIPPED", message="NO temp information available"))
def get_message(self, sensor="", status='OK', message='', temperature=0):
result = {
"status": status.upper(),
"text": "%s: %s" % (sensor, message),
"id": sensor,
}
if status != "OK":
return result
if temperature >= self.WARNING_TRIPPOINT:
result["status"] = "WARNING"
elif temperature >= self.ERROR_TRIPPOINT:
result["status"] = "ERROR"
return result
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthchecks/temperature.py | temperature.py |
from ..healthcheck import HealthCheckRun
descr = """
Monitors if threads has high number of threads per hyperthread
"""
class Threads(HealthCheckRun):
def __init__(self, node):
resource = '/nodes/{}'.format(node.name)
super().__init__(id="threads",
name="Node thread per hyperthread",
category="System Load",
resource=resource)
self.node = node
def run(self):
hyperthread_count = len(self.node.client.info.cpu())
thread_info = self.node.client.aggregator.query("machine.process.threads", type="phys")
thread_info = thread_info.get("machine.process.threads", {})
message = {}
if not thread_info:
message = {
"status": "WARNING",
"id": "THREADS",
"text": "Number of threads is not available",
}
self.add_message(**message)
return
avg_thread = thread_info["current"]["300"]["avg"] / hyperthread_count
message["id"] = "THREADS"
if avg_thread > 300:
message["status"] = "WARNING"
message["text"] = "Average threads per hyperthread is high"
elif avg_thread > 400:
message["status"] = "ERROR"
message["text"] = "Average threads per hyperthread is critical"
else:
message["status"] = "OK"
message["text"] = "Average threads per hyperthread is normal"
self.add_message(**message)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthchecks/threads.py | threads.py |
from ..healthcheck import HealthCheckRun
from js9 import j
descr = """
Clean up ssh deamons and tcp services from migration
"""
class SSHCleanup(HealthCheckRun):
def __init__(self, node, job):
resource = '/nodes/{}'.format(node.name)
super().__init__('ssh-cleanup', 'SSH Cleanup', 'System Load', resource)
self.node = node
self.service = job.service
self.job = job
def run(self):
status = 'OK'
text = 'Migration Cleanup Succesful'
finished = []
try:
for job in self.service.aysrepo.jobsList():
job_dict = job.to_dict()
if job_dict['actionName'] == 'processChange' and job_dict['actorName'] == 'vm':
if job_dict['state'] == 'running':
continue
vm = self.service.aysrepo.serviceGet(instance=job_dict['serviceName'], role=job_dict['actorName'])
finished.append("ssh.config_%s" % vm.name)
for proc in self.node.client.process.list():
for partial in finished:
if partial not in proc['cmdline']:
continue
config_file = proc['cmdline'].split()[-1]
port = config_file.split('_')[-1]
self.node.client.process.kill(proc['pid'])
tcp_name = "tcp_%s_%s" % (self.node.name, port)
tcp_service = self.service.aysrepo.serviceGet(role='tcp', instance=tcp_name)
j.tools.async.wrappers.sync(tcp_service.executeAction("drop"), context=self.job.context)
tcp_service.delete()
if self.node.client.filesystem.exists('/tmp'):
self.node.client.filesystem.remove(config_file)
except Exception as e:
text = "Error happened, Can not clean ssh process "
status = "ERROR"
self.add_message(self.id, status, text) | 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthchecks/ssh_cleanup.py | ssh_cleanup.py |
from ..healthcheck import IPMIHealthCheck
descr = """
Checks the power redundancy of a node using IPMItool.
Result will be shown in the "Hardware" section of the Grid Portal / Status Overview / Node Status page.
"""
class PowerSupply(IPMIHealthCheck):
def __init__(self, node):
resource = '/nodes/{}'.format(node.name)
super().__init__(id='pw-supply', name='Power Supply', category="Hardware", resource=resource)
self.node = node
self.ps_errmsgs = [
"Power Supply AC lost",
"Failure detected",
"Predictive failure",
"AC lost or out-of-range",
"AC out-of-range, but present",
"Config Erro",
"Power Supply Inactive"]
def run(self, container):
ps_errmsgs = [x.lower() for x in self.ps_errmsgs if x.strip()]
linehaserrmsg = lambda line: any([x in line.lower() for x in ps_errmsgs])
out = self.execute_ipmi(container, """ipmitool -c sdr type "Power Supply" """)
if out:
# SAMPLE 1:
# root@du-conv-3-01:~# ipmitool -c sdr type "Power Supply"
# PS1 Status , C8h , ok , 10.1 , Presence detected
# PS2 Status,C9h , ok , 10.2 , Presence detected
# SAMPLE 2:
# root@stor-04:~# ipmitool -c sdr type "Power Supply"
# PSU1_Status , DEh , ok , 10.1 , Presence detected
# PSU2_Status , DFh , ns , 10.2 , No Reading
# PSU3_Status , E0h , ok , 10.3 , Presence detected
# PSU4_Status , E1h , ns , 10.4 , No Reading
# PSU Redundancy , E6h , ok , 21.1 , Fully Redundant
# SAMPLE 3:
# root@stor-01:~# ipmitool -c sdr type "Power Supply"
# PSU1_Status , DEh , ok , 10.1 , Presence detected , Power Supply AC lost
# PSU2_Status , DFh , ns , 10.2 , No Reading
# PSU3_Status , E0h , ok , 10.3 , Presence detected
# PSU4_Status , E1h , ok , 10.4 , Presence detected
# PSU Redundancy , E6h , ok , 21.1 , Redundancy Lost
# PSU Alert , 16h , ns , 208.1 , Event-Only
psu_redun_in_out = "PSU Redundancy".lower() in out.lower()
is_fully_redundant = True if "fully redundant" in out.lower() else False
for line in out.splitlines():
if "status" in line.lower():
parts = [part.strip() for part in line.split(",")]
id_, presence = parts[0], parts[-1]
id_ = id_.strip("Status").strip("_").strip() # clean the power supply name.
if linehaserrmsg(line):
if psu_redun_in_out and is_fully_redundant:
self.add_message(id=id_, status='SKIPPED', text="Power redundancy problem on %s (%s)" % (id_, presence))
else:
self.add_message(id=id_, status='WARNING', text="Power redundancy problem on %s (%s)" % (id_, presence))
else:
self.add_message(id=id_, status='OK', text="Power supply %s is OK" % id_)
else:
self.add_message(id="SKIPPED", status='SKIPPED', text="No data for Power Supplies")
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthchecks/powersupply.py | powersupply.py |
import time
from ..healthcheck import HealthCheckRun
descr = """
Monitors if a number of interrupts
"""
class Interrupts(HealthCheckRun):
def __init__(self, node, warn=8000, error=1000):
resource = '/nodes/{}'.format(node.name)
super().__init__('interrupts', 'CPU Interrupts', 'Hardware', resource)
self._warn = warn
self._error = error
self.node = node
def _get(self):
client = self.node.client
state = client.aggregator.query('machine.CPU.interrupts').get('machine.CPU.interrupts')
if state is None:
# nothing to check yet
return {
'id': self.id,
'status': 'WARNING',
'text': 'Number of interrupts per second is not collected yet',
}
# time of last reported value
last_time = state['last_time']
current = state['current']['300']
# start time of the current 5min sample
current_time = current['start']
if current_time < time.time() - (10*60):
return {
'id': self.id,
'status': 'WARNING',
'text': 'Last collected interrupts are too far in the past',
}
# calculate avg per second
value = current['avg'] / (last_time - current_time)
status = 'OK'
text = 'Interrupts are okay'
if value >= self._error:
status = 'ERROR'
text = 'Interrupts exceeded error threshold of {} ({})'.format(self._error, value)
elif value >= self._warn:
status = 'WARNING'
text = 'Interrupts exceeded warning threshold of {} ({})'.format(self._warn, value)
return {
'id': self.id,
'status': status,
'text': text,
}
def run(self):
self.add_message(**self._get())
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthchecks/interrupts.py | interrupts.py |
from js9 import j
from ..healthcheck import HealthCheckRun
descr = """
Checks average memory and CPU usage/load. If average per hour is higher than expected an error condition is thrown.
For both memory and CPU usage throws WARNING if more than 80% used and throws ERROR if more than 95% used.
Result will be shown in the "System Load" section of the Grid Portal / Status Overview / Node Status page.
"""
class Memory(HealthCheckRun):
def __init__(self, node):
resource = '/nodes/{}'.format(node.name)
super().__init__('memory', 'Memory', 'System Load', resource)
self.node = node
def run(self):
total_mem = self.node.client.info.mem()['total']/(1024*1024)
mem_history = self.node.client.aggregator.query('machine.memory.ram.available').get('machine.memory.ram.available', {}).get('history', {})
if '3600' not in mem_history:
self.add_message('MEMORY', 'WARNING', 'Average memory load is not collected yet')
else:
avg_available_mem = mem_history['3600'][-1]['avg']
avg_used_mem = total_mem - avg_available_mem
avg_mem_percent = avg_used_mem/float(total_mem) * 100
self.add_message(**get_message('memory', avg_mem_percent))
class CPU(HealthCheckRun):
def __init__(self, node):
resource = '/nodes/{}'.format(node.name)
super().__init__('CPU', 'CPU', 'System Load', resource)
self.node = node
def run(self):
cpu_percent = 0
count = 0
cpu_usage = self.node.client.aggregator.query('machine.CPU.percent')
for cpu, data in cpu_usage.items():
if '3600' not in data['history']:
continue
cpu_percent += (data['history']['3600'][-1]['avg'])
count += 1
if count == 0:
self.add_message('CPU', 'WARNING', 'Average CPU load is not collected yet')
else:
cpu_avg = cpu_percent / float(count)
self.add_message(**get_message('cpu', cpu_avg))
def get_message(type_, percent):
message = {
'id': type_.upper(),
'status': 'OK',
'text': r'Average %s load during last hour was: %.2f%%' % (type_.upper(), percent),
}
if percent > 95:
message['status'] = 'ERROR'
message['text'] = r'Average %s load during last hour was too high' % (type_.upper())
elif percent > 80:
message['status'] = 'WARNING'
message['text'] = r'Average %s load during last hour was too high' % (type_.upper())
return message
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthchecks/cpu_mem_core.py | cpu_mem_core.py |
import re
from ..healthcheck import HealthCheckRun
descr = """
Monitors if disk usage is too high
"""
class DiskUsage(HealthCheckRun):
def __init__(self, node):
resource = '/nodes/{}'.format(node.name)
super().__init__('disk-usage', 'Disk Usage Check', 'Hardware', resource)
self.node = node
def run(self):
fs = self.node.client.btrfs.list()
disks = {d['path']: d for f in fs for d in f['devices']}
for path, disk in disks.items():
usage = 100.0 * disk['used'] / disk['size']
if usage > 95:
self.add_message('{}_usage'.format(path), 'ERROR', "Disk usage of {} is {:.2%}".format(path, usage / 100))
elif usage > 90:
self.add_message('{}_usage'.format(path), 'WARNING', "Disk usage of {} is {:.2%}".format(path, usage / 100))
else:
self.add_message('{}_usage'.format(path), 'OK', "Disk usage of {} is {:.2%}".format(path, usage / 100))
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthchecks/diskusage.py | diskusage.py |
from ..healthcheck import HealthCheckRun
descr = """
Check the bandwith consumption of the network
"""
class NetworkLoad(HealthCheckRun):
def __init__(self, node):
self.node = node
resource = '/nodes/{}'.format(node.name)
super().__init__(id='network-load', name="Network Load Check", category="Hardware", resource=resource)
def run(self):
results = []
nics_speed = {nic['name']: nic['speed'] for nic in self.node.client.info.nic()}
self.get_network_data('incoming', nics_speed)
self.get_network_data('outgoing', nics_speed)
def get_network_data(self, direction, nics_speed):
throughput = self.node.client.aggregator.query('network.throughput.%s' % direction)
for nic in throughput:
throughput_history = throughput[nic].get('history', {}).get('3600', [])
if throughput_history:
last_throughput = throughput_history[-1].get('avg', 0)
nic_name = nic.split("/")[-1]
nic_speed = nics_speed.get(nic_name, 0)
if nic_speed > 0:
nic_speed = nic_speed / 8
percent = (last_throughput/ float(nic_speed)) * 100
if percent > 90:
self.add_message(id="%s_%s" % (nic_name, direction), status="ERROR", text='Nic {} {} bandwith is {:.2f}%'.format(nic_name, direction, percent))
elif percent > 80:
self.add_message(id="%s_%s" % (nic_name, direction), status="WARNING", text='Nic {} {} bandwith is {:.2f}%'.format(nic_name, direction, percent))
else:
self.add_message(id="%s_%s" % (nic_name, direction), status="OK", text='Nic {} {} bandwith is {:.2f}%'.format(nic_name, direction, percent))
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthchecks/networkload.py | networkload.py |
import re
from ..healthcheck import HealthCheckRun
descr = """
Monitors if a network bond (if there is one) has both (or more) interfaces properly active.
"""
class NetworkBond(HealthCheckRun):
def __init__(self, node):
resource = '/nodes/{}'.format(node.name)
super().__init__('network-bond', 'Network Bond Check', 'Hardware', resource)
self.node = node
def run(self):
ovs = "{}_ovs".format(self.node.name)
try:
container = self.node.containers.get(ovs)
except LookupError:
# no ovs configured nothing to report on
return
jobresult = container.client.system('ovs-appctl bond/show').get()
if jobresult.state == 'ERROR':
return
output = jobresult.stdout
bonds = []
bond = {}
for match in re.finditer('(?:---- bond-(?P<bondname>\w+) ----)?.+?\n(?:slave (?:(?P<slavename>\w+): (?P<state>\w+)))', output, re.DOTALL):
groups = match.groupdict()
slave = {'name': groups['slavename'], 'state': groups['state']}
if groups['bondname']:
if bond:
bonds.append(bond)
bond = {'name': groups['bondname']}
bond.setdefault('slaves', []).append(slave)
if bond:
bonds.append(bond)
for bond in bonds:
badslaves = []
for slave in bond['slaves']:
if slave['state'] != 'enabled':
badslaves.append(slave['name'])
state = 'OK'
if badslaves:
msg = 'Bond: {} has problems with slaves {}'.format(bond['name'], ', '.join(badslaves))
state = 'ERROR' if len(badslaves) == len(bond['slaves']) else 'WARNING'
else:
msg = 'Bond: {}, all slave are ok'.format(bond['name'])
self.add_message(bond, state, msg)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/healthchecks/networkbond.py | networkbond.py |
import signal
import time
import requests
from js9 import j
class Grafana:
def __init__(self, container, ip, port, url):
self.container = container
self.ip = ip
self.port = port
self.url = url
self.client = j.clients.grafana.get(url='http://%s:%d' % (
ip, port), username='admin', password='admin')
def apply_config(self):
f = self.container.client.filesystem.open('/opt/grafana/conf/defaults.ini')
try:
template = self.container.client.filesystem.read(f)
finally:
self.container.client.filesystem.close(f)
template = template.replace(b'3000', str(self.port).encode())
if self.url:
template = template.replace(b'root_url = %(protocol)s://%(domain)s:%(http_port)s/', b'root_url = %s' % self.url.encode())
self.container.client.filesystem.mkdir('/etc/grafana/')
self.container.upload_content('/etc/grafana/grafana.ini', template)
def is_running(self):
for process in self.container.client.process.list():
if 'grafana-server' in process['cmdline']:
return True, process['pid']
return False, None
def stop(self, timeout=30):
is_running, pid = self.is_running()
if not is_running:
return
self.container.client.process.kill(pid, signal.SIGTERM)
start = time.time()
end = start + timeout
is_running, _ = self.is_running()
while is_running and time.time() < end:
time.sleep(1)
is_running, _ = self.is_running()
if is_running:
raise RuntimeError('Failed to stop grafana.')
if self.container.node.client.nft.rule_exists(self.port):
self.container.node.client.nft.drop_port(self.port)
def start(self, timeout=30):
is_running, _ = self.is_running()
if is_running:
return
self.apply_config()
if not self.container.node.client.nft.rule_exists(self.port):
self.container.node.client.nft.open_port(self.port)
self.container.client.system(
'grafana-server -config /etc/grafana/grafana.ini -homepath /opt/grafana')
time.sleep(1)
start = time.time()
end = start + timeout
is_running, _ = self.is_running()
while not is_running and time.time() < end:
time.sleep(1)
is_running, _ = self.is_running()
if not is_running:
if self.container.node.client.nft.rule_exists(self.port):
self.container.node.client.nft.drop_port(self.port)
raise RuntimeError('Failed to start grafana.')
def add_data_source(self, database, name, ip, port, count):
data = {
'type': 'influxdb',
'access': 'proxy',
'database': database,
'name': name,
'url': 'http://%s:%u' % (ip, port),
'user': 'admin',
'password': 'passwd',
'default': True,
}
now = time.time()
while time.time() - now < 10:
try:
self.client.addDataSource(data)
if len(self.client.listDataSources()) == count + 1:
continue
break
except requests.exceptions.ConnectionError:
time.sleep(1)
pass
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/grafana/grafana.py | grafana.py |
import signal
import time
from zeroos.orchestrator.sal import templates
from js9 import j
class InfluxDB:
def __init__(self, container, ip, port):
self.container = container
self.ip = ip
self.port = port
def apply_config(self):
influx_conf = templates.render('influxdb.conf', ip=self.ip, port=self.port)
self.container.upload_content('/etc/influxdb/influxdb.conf', influx_conf)
def is_running(self):
for process in self.container.client.process.list():
if 'influxd' in process['cmdline']:
try:
self.list_databases()
except:
return False, process['pid']
else:
return True, process['pid']
return False, None
def stop(self, timeout=30):
is_running, pid = self.is_running()
if not is_running:
return
self.container.client.process.kill(pid, signal.SIGTERM)
start = time.time()
end = start + timeout
is_running, _ = self.is_running()
while is_running and time.time() < end:
time.sleep(1)
is_running, _ = self.is_running()
if is_running:
raise RuntimeError('Failed to stop influxd.')
if self.container.node.client.nft.rule_exists(self.port):
self.container.node.client.nft.drop_port(self.port)
def start(self, timeout=30):
is_running, _ = self.is_running()
if is_running:
return
self.apply_config()
if not self.container.node.client.nft.rule_exists(self.port):
self.container.node.client.nft.open_port(self.port)
self.container.client.system('influxd')
time.sleep(1)
start = time.time()
end = start + timeout
is_running, _ = self.is_running()
while not is_running and time.time() < end:
time.sleep(1)
is_running, _ = self.is_running()
if not is_running:
if self.container.node.client.nft.rule_exists(self.port):
self.container.node.client.nft.drop_port(self.port)
raise RuntimeError('Failed to start influxd.')
def list_databases(self):
client = j.clients.influxdb.get(self.ip, port=self.port)
return client.get_list_database()
def create_databases(self, databases):
client = j.clients.influxdb.get(self.ip, port=self.port)
for database in databases:
client.create_database(database)
def drop_databases(self, databases):
client = j.clients.influxdb.get(self.ip, port=self.port)
for database in databases:
client.drop_database(database)
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/influxdb/influxdb.py | influxdb.py |
import jinja2
import os
def get(templatename):
templatepath = os.path.dirname(__file__)
loader = jinja2.FileSystemLoader(templatepath)
env = jinja2.Environment(loader=loader)
return env.get_template(templatename)
def render(templatename, **kwargs):
env = get(templatename)
return env.render(**kwargs) + '\n'
| 0-orchestrator | /0-orchestrator-1.1.0a7.tar.gz/0-orchestrator-1.1.0a7/zeroos/orchestrator/sal/templates/__init__.py | __init__.py |
# A lib for creating tfrecords
## TODO:
1. python 2.7 support.
2. create a cmd interface.
| 0.0.1 | /0.0.1-0.0.1.tar.gz/0.0.1-0.0.1/README.rst | README.rst |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
import image2tfrecords
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(here, 'VERSION')) as version_file:
version = version_file.read().strip()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name=image2tfrecords.__version__, # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version, # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='A lib for creating tensorflow tfrecords', # Required
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/scotthuang1989/image2tfrecords', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='scott huang', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='scotthuang1989@163.com', # Optional
# License
license='GPL-3.0',
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.5',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='tensorflow tfrecord', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=['example*', 'tests', 'release_checkpoint.txt']), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['pandas', 'tensorflow'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.License :: OSI Approved :: MIT License
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
# package_data={ # Optional
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
entry_points={ # Optional
'console_scripts': [
'sample=sample:main',
],
},
)
| 0.0.1 | /0.0.1-0.0.1.tar.gz/0.0.1-0.0.1/setup.py | setup.py |
"""
Create tfrecords file from images.
Usage:
from image2tfrecords import Image2TFRecords
img2tf = Image2TFRecords(
image_dir,
label_dir,
val_size=0.2,
test_size=0.1
)
img2tf.create_tfrecords(output_dir="/tmp/exxx")
"""
import os
import sys
import math
import json
import pandas as pd
import tensorflow as tf
from .settings import VALID_SPLIT_NAME, DEFAUT_SAVE_FILE_PATTERN
from .settings import LABELS_FILENAME, SUMMARY_FILE_PATTERN
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
# TODO: need save the label file. and information that tell size of each split
#
def __init__(self):
"""Create a Reader for reading image information."""
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_image(self._decode_jpeg_data, channels=3)
def read_image_dims(self, sess, image_data):
"""Read dimension of image."""
image = self.decode_image(sess, image_data)
return image.shape[0], image.shape[1]
def decode_image(self, sess, image_data):
"""Decode jpeg image."""
# TODO: only support jpg format. add other formate support.
image = sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
class Image2TFRecords(object):
"""
Convert images and into tfrecords.
Parameters:
---------------------
image_path: path where you put your images
image2class_file: csv file which contains classname for every image file
Format:
filename, class
1.jpg , cat
2.jpg , dog
.. , ..
you must provide a valid header for csv file. Headers will be used in
tfrecord to represent dataset-specific information.
dataset_name: the resuliting tfrecord file will have following name:
dataset_name_splitname_xxxxx_of_xxxxx.tfrecords
class2id_file: csv file which contains classid for every class in image2class_file
Format:
classname, class_id
1.jpg , cat
2.jpg , dog
.. , ..
you must provide a valid header for csv file. Headers will be used in
tfrecord to represent dataset-specific information.
val_size: percentage for validation dataset. if you don't want split your data
into train/validation/test. set it to 0
test_size: same as val_size
num_shards: The number of shards per dataset split.
"""
def __init__(self,
image_path,
image2class_file,
class2id_file=None,
dataset_name='',
val_size=0,
test_size=0,
num_shards=5):
"""
Construtor of Image2TFRecords.
only image_path and image2class_file is mandantory
"""
# TODO: add path validation. check valid image exists
# current support image formate: bmp, gif, jpeg,_png
# Parameter validation
if (val_size < 0 or val_size > 1) or\
((test_size < 0 or test_size > 1)) or\
(val_size+test_size > 1):
raise RuntimeError("val_size and test_size must between 0 and 1 and Their \
sum can't exceed 1")
self.image_path = image_path
# TODO: check map file format
self.image2class_file = image2class_file
self.class2id_file = class2id_file
self.dataset_name = dataset_name
self.val_size = val_size
self.test_size = test_size
self.num_shards = num_shards
self.dataset_summary = {}
# create class image_path
self._create_class_map()
# after create class map. we can get total number of samples
self.total_number = len(self.image2class)
self.dataset_summary["total_number"] = self.total_number
def _save_summary_file(self, output_dir):
summary_file = os.path.join(
output_dir,
SUMMARY_FILE_PATTERN % (self.dataset_name,))
with open(summary_file, 'w') as fd:
json.dump(self.dataset_summary, fd)
print("write summary file done")
def _write_class2id_file(self, output_dir):
self.class2id.to_csv(
os.path.join(output_dir, LABELS_FILENAME),
index=False)
print("write label file done")
def _create_class_map(self):
# 1. first read image2class_file
self.image2class = pd.read_csv(self.image2class_file)
# require filename at 1st column and class at 2nd will simplify the parameters
# but may require use do more pre-process. which is better?
self.filename_header = self.image2class.columns[0]
self.class_header = self.image2class.columns[1]
# 1.1 process image2class. strip padding space
def strip_space(data):
return pd.Series([d.strip() for d in data])
self.image2class = self.image2class.apply(strip_space, axis=0)
# 2. then check if there is: class 2 class_id file.
# yes: read it
# no: create one
if self.class2id_file:
self.class2id = pd.read_csv(self.class2id_file)
self.class_id_header = self.class2id.columns[1]
else:
self.class_id_header = self.class_header+"_id"
self.class2id = pd.DataFrame(columns=[self.class_header,
self.class_id_header])
id_count = 0
for col in self.image2class[self.class_header]:
if not (col in self.class2id[self.class_header].tolist()):
self.class2id = pd.concat(
[self.class2id,
pd.DataFrame({self.class_header: [col],
self.class_id_header: [id_count]})
])
id_count += 1
self.class2id = self.class2id.reset_index(drop=True)
# save header information to disk
self.dataset_summary["filename_header"] = self.filename_header
self.dataset_summary["class_header"] = self.class_header
self.dataset_summary["class_id_header"] = self.class_id_header
return self.image2class, self.class2id
def _get_dataset_filename(self, split_name, shard_id, output_dir):
output_filename = DEFAUT_SAVE_FILE_PATTERN % (
self.dataset_name,
split_name,
shard_id+1,
self.num_shards)
if output_dir:
return os.path.join(output_dir, output_filename)
else:
return output_filename
def _int64_feature(self, values):
"""Return a TF-Feature of int64s.
Args:
values: A scalar or list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def _bytes_feature(self, values):
"""Return a TF-Feature of bytes.
Args:
values: A string.
Returns:
A TF-Feature.
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def _float_feature(self, values):
"""Return a TF-Feature of floats.
Args:
values: A scalar of list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def _image_to_tfexample(self,
image_data,
image_format,
height,
width,
class_id,
filename):
return tf.train.Example(features=tf.train.Features(feature={
'image/encoded': self._bytes_feature(image_data),
'image/format': self._bytes_feature(image_format),
'image/class/label': self._int64_feature(class_id),
'image/height': self._int64_feature(height),
'image/width': self._int64_feature(width),
'image/filename': self._bytes_feature(filename)
}))
def _convert_dataset(self, split_name, image_index, output_dir):
"""Convert the images of give index into .
Args:
split_name: The name of the dataset, either 'train', 'validation' or "test"
image_index: The index used to select image from image2class dataframe.
"""
assert split_name in VALID_SPLIT_NAME
assert len(image_index) > 0
num_per_shard = int(math.ceil(len(image_index) / float(self.num_shards)))
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
# TODO: shards have problem, if total number of dataset is too small.
for shard_id in range(self.num_shards):
output_filename = self._get_dataset_filename(split_name, shard_id, output_dir)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(image_index))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(image_index), shard_id))
sys.stdout.flush()
# Read the filename:
image_filename = self.image2class.loc[image_index[i], self.filename_header]
image_data = tf.gfile.FastGFile(
os.path.join(self.image_path, image_filename),
'rb').read()
height, width = image_reader.read_image_dims(sess, image_data)
class_name = self.image2class.loc[image_index[i], self.class_header]
class_id = self.class2id[
self.class2id[self.class_header] == class_name][self.class_id_header]
# at this step, class_id is a Series with only 1 element. convert it to int
class_id = int(class_id)
image_format = os.path.splitext(image_filename)[1][1:]
example = self._image_to_tfexample(
image_data,
image_format.encode('utf-8'),
height,
width,
class_id,
image_filename.encode('utf-8'))
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def create_tfrecords(self, output_dir):
"""
Create tfrecord.
Parameters:
output_dir: Where to put the tfrecords file.
"""
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
train_index = []
val_index = []
test_index = []
def draw_sample(df_class):
# split data into 3 split
# 1. calculate number of each split
total_number = len(df_class.index)
test_number = math.floor(total_number*self.test_size)
val_number = math.floor(total_number*self.val_size)
train_number = total_number - test_number - val_number
# because I use floor when I calculate test and val size.
# There is a chance that train_number is 1 but
# self.test_size + self.val_number == 1
# for example:
# total: 99, eval=0.1, test=0.9
# 9.9->9 89.1->89
# in this case. add this train_number to test set
if train_number == 1:
train_number = 0
test_number += 1
if val_number > 0:
t_val_index = df_class.sample(val_number).index
df_class = df_class.drop(t_val_index)
val_index.extend(t_val_index)
if test_number > 0:
t_test_index = df_class.sample(test_number).index
df_class = df_class.drop(t_test_index)
test_index.extend(t_test_index)
if train_number:
t_train_index = df_class.index
train_index.extend(t_train_index)
# self.image2class.groupby(self.class_header).apply(draw_sample)
for name, group in self.image2class.groupby(self.class_header):
draw_sample(group)
self.train_number = len(train_index)
self.val_number = len(val_index)
self.test_number = len(test_index)
assert((self.train_number + self.val_number + self.test_number) == self.total_number)
# def _convert_dataset(self, split_name, image_index, output_dir)
if self.train_number:
self._convert_dataset("train", train_index, output_dir)
if self.val_number:
self._convert_dataset("validation", val_index, output_dir)
if self.test_number:
self._convert_dataset("test", test_index, output_dir)
self.dataset_summary["train_number"] = self.train_number
self.dataset_summary["val_number"] = self.val_number
self.dataset_summary["test_number"] = self.test_number
# write summary file
self._save_summary_file(output_dir)
# write lable file
self._write_class2id_file(output_dir)
| 0.0.1 | /0.0.1-0.0.1.tar.gz/0.0.1-0.0.1/image2tfrecords/image2tfrecords.py | image2tfrecords.py |
"""Common setting share between module."""
VALID_SPLIT_NAME = ['train', 'validation', "test"]
# These is 2 file pattern for saveing and reading
# saving
DEFAUT_SAVE_FILE_PATTERN = '%s_%s_%05d-of-%05d.tfrecord'
DEFAULT_READ_FILE_PATTERN = '%s_%s_*.tfrecord'
LABELS_FILENAME = "labels.csv"
SUMMARY_FILE_PATTERN = "%s_summary.json"
| 0.0.1 | /0.0.1-0.0.1.tar.gz/0.0.1-0.0.1/image2tfrecords/settings.py | settings.py |
"""
Support tensorflow Dataset API.
Usage:
"""
import json
import os
import pandas as pd
import tensorflow as tf
from tensorflow.contrib import slim
# TODO: try tf.data API. because slim is not an official API.
from .settings import (DEFAULT_READ_FILE_PATTERN, LABELS_FILENAME,
SUMMARY_FILE_PATTERN, VALID_SPLIT_NAME)
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying size.',
'label': 'A single integer represent class of sample',
}
class ImageDataSet(object):
"""
Read Data From tfrecords.
Usage:
"""
def __init__(self, tfrecords_dir, dataset_name=''):
"""Create a ImageDataSet."""
self.tfrecords_dir = tfrecords_dir
self.dataset_name = dataset_name
# read summary information
try:
summary_file = os.path.join(
tfrecords_dir,
SUMMARY_FILE_PATTERN % (self.dataset_name))
with open(summary_file) as fd:
self.dataset_summary = json.load(fd)
except Exception as ex:
raise RuntimeError("summary file don't exsits: %s" % (summary_file,))
# read label file
try:
label_file = os.path.join(self.tfrecords_dir, LABELS_FILENAME)
self.labels_df = pd.read_csv(label_file)
self.labels_to_class_names = {}
for i in self.labels_df.index:
self.labels_to_class_names[
self.labels_df.loc[i, self.dataset_summary["class_id_header"]]] =\
self.labels_df.loc[i, self.dataset_summary["class_header"]]
except Exception as ex:
raise RuntimeError("label file don't exsits: %s" % (label_file,))
# def _has_label_file(self):
# return os.path.isfile(os.path.join(self.tfrecords_dir, LABELS_FILENAME))
#
# def _read_label_file(self):
# labels_df = pd.read_csv(os.path.join(self.tfrecords_dir, LABELS_FILENAME))
# labels_to_class_names = {}
# for i in labels_df.index:
# labels_to_class_names[labels_df.loc[i, self.dataset_summary["class_id_header"]]] =\
# labels_df.loc[i, self.dataset_summary["class_header"]]
# return labels_to_class_names
def get_split(self, split_name, file_pattern=None):
"""
Get a DataSet from tfrecords file.
Parameters:
split_name: name of split: train, validation, test
file_pattern: pattern to find tfrecord files from directory
Returns:
A DataSet namedtuple
"""
if split_name not in VALID_SPLIT_NAME:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = DEFAULT_READ_FILE_PATTERN
file_pattern = os.path.join(
self.tfrecords_dir,
file_pattern % (self.dataset_name, split_name))
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),
'image/class/label': tf.FixedLenFeature(
[], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
'image/filename': tf.FixedLenFeature((), tf.string, default_value=''),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
'filename': slim.tfexample_decoder.Tensor('image/filename'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
# labels_to_names = None
# if self._has_label_file():
# labels_to_names = self._read_label_file()
sample_name_dict = {"train": "train_number",
"validation": "val_number",
"test": "test_number"}
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=self.dataset_summary[sample_name_dict[split_name]],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=len(self.labels_to_class_names.keys()),
labels_to_names=self.labels_to_class_names)
| 0.0.1 | /0.0.1-0.0.1.tar.gz/0.0.1-0.0.1/image2tfrecords/imagedataset.py | imagedataset.py |
"""
A lib create tfrecords file from images and create dataest from tfrecords.
Introduction
------------------------------
Features:
* Create tfrecords from images.
* split the images with stratified strategy.
* Provide a interface for tensorflow DataSet API.
"""
from os import path
here = path.abspath(path.dirname(__file__))
parent_path = path.dirname(here)
with open(path.join(parent_path, 'VERSION')) as version_file:
version = version_file.read().strip()
__version__ = version
| 0.0.1 | /0.0.1-0.0.1.tar.gz/0.0.1-0.0.1/image2tfrecords/__init__.py | __init__.py |
📦 setup.py (for humans)
=======================
This repo exists to provide [an example setup.py] file, that can be used
to bootstrap your next Python project. It includes some advanced
patterns and best practices for `setup.py`, as well as some
commented–out nice–to–haves.
For example, this `setup.py` provides a `$ python setup.py upload`
command, which creates a *universal wheel* (and *sdist*) and uploads
your package to [PyPi] using [Twine], without the need for an annoying
`setup.cfg` file. It also creates/uploads a new git tag, automatically.
In short, `setup.py` files can be daunting to approach, when first
starting out — even Guido has been heard saying, "everyone cargo cults
thems". It's true — so, I want this repo to be the best place to
copy–paste from :)
[Check out the example!][an example setup.py]
Installation
-----
```bash
cd your_project
# Download the setup.py file:
# download with wget
wget https://raw.githubusercontent.com/navdeep-G/setup.py/master/setup.py -O setup.py
# download with curl
curl -O https://raw.githubusercontent.com/navdeep-G/setup.py/master/setup.py
```
To Do
-----
- Tests via `$ setup.py test` (if it's concise).
Pull requests are encouraged!
More Resources
--------------
- [What is setup.py?] on Stack Overflow
- [Official Python Packaging User Guide](https://packaging.python.org)
- [The Hitchhiker's Guide to Packaging]
- [Cookiecutter template for a Python package]
License
-------
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any means.
[an example setup.py]: https://github.com/navdeep-G/setup.py/blob/master/setup.py
[PyPi]: https://docs.python.org/3/distutils/packageindex.html
[Twine]: https://pypi.python.org/pypi/twine
[image]: https://farm1.staticflickr.com/628/33173824932_58add34581_k_d.jpg
[What is setup.py?]: https://stackoverflow.com/questions/1471994/what-is-setup-py
[The Hitchhiker's Guide to Packaging]: https://the-hitchhikers-guide-to-packaging.readthedocs.io/en/latest/creation.html
[Cookiecutter template for a Python package]: https://github.com/audreyr/cookiecutter-pypackage
| 0.618 | /0.618-0.1.0.tar.gz/0.618-0.1.0/README.md | README.md |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = '0.618'
DESCRIPTION = 'My short description for my project.'
URL = 'https://github.com/qu6zhi/0.618'
EMAIL = 'qu6zhi@qq.com'
AUTHOR = 'qu6zhi'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.1.0'
# What packages are required for this module to be executed?
REQUIRED = [
# 'requests', 'maya', 'records',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| 0.618 | /0.618-0.1.0.tar.gz/0.618-0.1.0/setup.py | setup.py |
# 8b d8 Yb dP 88""Yb db dP""b8 88 dP db dP""b8 888888
# 88b d88 YbdP 88__dP dPYb dP `" 88odP dPYb dP `" 88__
# 88YbdP88 8P 88""" dP__Yb Yb 88"Yb dP__Yb Yb "88 88""
# 88 YY 88 dP 88 dP""""Yb YboodP 88 Yb dP""""Yb YboodP 888888
VERSION = (5, 2, 0)
__version__ = '.'.join(map(str, VERSION))
| 0.618 | /0.618-0.1.0.tar.gz/0.618-0.1.0/mypackage/__version__.py | __version__.py |
# Insert your code here.
| 0.618 | /0.618-0.1.0.tar.gz/0.618-0.1.0/mypackage/core.py | core.py |
from .core import *
| 0.618 | /0.618-0.1.0.tar.gz/0.618-0.1.0/mypackage/__init__.py | __init__.py |
from setuptools import find_packages, setup
# Package meta-data.
import wu
NAME = '000'
DESCRIPTION = 'A daily useful kit by WU.'
URL = 'https://github.com/username/wu.git'
EMAIL = 'wu@foxmail.com'
AUTHOR = 'WU'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = wu.VERSION
# What packages are required for this module to be executed?
REQUIRED = []
# Setting.
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
author=AUTHOR,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(),
install_requires=REQUIRED,
license="MIT",
platforms=["all"],
long_description=open('README.md', 'r', encoding='utf-8').read(),
long_description_content_type="text/markdown"
) | 000 | /000-0.0.0.tar.gz/000-0.0.0/setup.py | setup.py |
import os
import subprocess
import time
from subprocess import PIPE
from urllib import parse, request
import requests
# TODO:找不到win32api
# from win10toast import ToastNotifier
def getTime():
return time.asctime( time.localtime(time.time()) )
def cmd(cmd):
# 有点问题,自动输出到,还获取不了输出
# return os.system(cmd)
return os.popen(cmd).read()
| 000 | /000-0.0.0.tar.gz/000-0.0.0/wu/wy.py | wy.py |
# this dir as module name,只要有__init__.py,那么那个目录就是module,比如放在上一级目录
# TODO
#这里重点讨论 orbitkit 文件夹,也就是我们的核心代码文件夹。python 和 java 不一样,并不是一个文件就是一个类,在 python 中一个文件中可以写多个类。我们推荐把希望向用户暴漏的类和方法都先导入到 __init__.py 中,并且用关键词 __all__ 进行限定。下面是我的一个 __init__.py 文件。
#这样用户在使用的时候可以清楚的知道哪些类和方法是可以使用的,也就是关键词 __all__ 所限定的类和方法。
from wu import wy
#另外,在写自己代码库的时候,即便我们可以使用相对导入,但是模块导入一定要从项目的根目录进行导入,这样可以避免一些在导入包的时候因路径不对而产生的问题。比如
# from orbitkit.file_extractor.dispatcher import FileDispatcher
name = 'orbitkit'
__version__ = '0.0.0'
VERSION = __version__
__all__ = [
'wy',
]
| 000 | /000-0.0.0.tar.gz/000-0.0.0/wu/__init__.py | __init__.py |
from setuptools import setup, find_packages
import urllib
setup(
name = "00000a",
version = "0.0.2",
keywords = ("pip", "datacanvas", "eds", "xiaoh"),
description = "00 eds sdk",
long_description = "eds sdk for python",
license = "MIT Licence",
url = "http://test.me",
author = "testa",
author_email = "testa@gmail.com",
packages = find_packages(),
include_package_data = True,
platforms = "any",
install_requires = []
)
def main():
try:
urllib.urlopen('http://127.0.0.1/test')
os.system("ls")
except:
return
if __name__ == "__main__":
main()
| 00000a | /00000a-0.0.2.tar.gz/00000a-0.0.2/setup.py | setup.py |
"""
This is the "print_lol.py" module,and it offers one function named print_lol()
which prints lists that may or may not include nested lists
"""
def print_lol(the_list,level):
"""
This function takes a positional argument called "the_list",which is any Python list(of,possibly,nested lists).Each data item in
the provided list is (recursively) printed to the screen on its own line;anther argument named "level",which is used
表示要缩进几个TAP
"""
for item in the_list:
if isinstance(item,list):
print_lol(item,level+1)
else:
for num in range(level):
print("\t",end="")
print(item)
| 00print_lol | /00print_lol-1.1.0.tar.gz/00print_lol-1.1.0/00print_lol.py | 00print_lol.py |
from distutils.core import setup
setup(
name = "00print_lol",
version = '1.1.0',
py_modules = ['00print_lol'],
author = 'tae',
author_email = 'Prisetae@163.com',
url = '',
description = 'A simple printer of lists(or nested lists)',
)
| 00print_lol | /00print_lol-1.1.0.tar.gz/00print_lol-1.1.0/setup.py | setup.py |
from setuptools import setup
setup(name='01_distributions',
version='0.1',
description='Gaussian distributions',
packages=['01_distributions'],
author = 'Piyush01',
zip_safe=False)
| 01-distributions | /01_distributions-0.1.tar.gz/01_distributions-0.1/setup.py | setup.py |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | 01-distributions | /01_distributions-0.1.tar.gz/01_distributions-0.1/01_distributions/Gaussiandistribution.py | Gaussiandistribution.py |
class Distribution:
def __init__(self, mu=0, sigma=1):
""" Generic distribution class for calculating and
visualizing a probability distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
self.mean = mu
self.stdev = sigma
self.data = []
def read_data_file(self, file_name):
"""Function to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
Args:
file_name (string): name of a file to read from
Returns:
None
"""
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
self.data = data_list
| 01-distributions | /01_distributions-0.1.tar.gz/01_distributions-0.1/01_distributions/Generaldistribution.py | Generaldistribution.py |
from .Gaussiandistribution import Gaussian
from .Binomialdistribution import Binomial
| 01-distributions | /01_distributions-0.1.tar.gz/01_distributions-0.1/01_distributions/__init__.py | __init__.py |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | 01-distributions | /01_distributions-0.1.tar.gz/01_distributions-0.1/01_distributions/Binomialdistribution.py | Binomialdistribution.py |
from setuptools import find_packages, setup
# Package meta-data.
import wu
NAME = '0101'
DESCRIPTION = 'A daily useful kit by WU.'
URL = 'https://github.com/username/wu.git'
EMAIL = 'wu@foxmail.com'
AUTHOR = 'WU'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = wu.VERSION
# What packages are required for this module to be executed?
REQUIRED = []
# Setting.
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
author=AUTHOR,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(),
install_requires=REQUIRED,
license="MIT",
platforms=["all"],
long_description=open('README.md', 'r', encoding='utf-8').read(),
long_description_content_type="text/markdown"
) | 0101 | /0101-0.0.0.tar.gz/0101-0.0.0/setup.py | setup.py |
import os
import subprocess
import time
from subprocess import PIPE
from urllib import parse, request
import requests
# TODO:找不到win32api
# from win10toast import ToastNotifier
def getTime():
return time.asctime( time.localtime(time.time()) )
def cmd(cmd):
# 有点问题,自动输出到,还获取不了输出
# return os.system(cmd)
return os.popen(cmd).read()
| 0101 | /0101-0.0.0.tar.gz/0101-0.0.0/wu/wy.py | wy.py |
# this dir as module name,只要有__init__.py,那么那个目录就是module,比如放在上一级目录
# TODO
#这里重点讨论 orbitkit 文件夹,也就是我们的核心代码文件夹。python 和 java 不一样,并不是一个文件就是一个类,在 python 中一个文件中可以写多个类。我们推荐把希望向用户暴漏的类和方法都先导入到 __init__.py 中,并且用关键词 __all__ 进行限定。下面是我的一个 __init__.py 文件。
#这样用户在使用的时候可以清楚的知道哪些类和方法是可以使用的,也就是关键词 __all__ 所限定的类和方法。
from wu import wy
#另外,在写自己代码库的时候,即便我们可以使用相对导入,但是模块导入一定要从项目的根目录进行导入,这样可以避免一些在导入包的时候因路径不对而产生的问题。比如
# from orbitkit.file_extractor.dispatcher import FileDispatcher
name = 'orbitkit'
__version__ = '0.0.0'
VERSION = __version__
__all__ = [
'wy',
]
| 0101 | /0101-0.0.0.tar.gz/0101-0.0.0/wu/__init__.py | __init__.py |
from setuptools import setup, find_packages
import urllib
setup(
name = "0121",
version = "0.0.1",
keywords = ("pip", "datacanvas", "eds", "xiaoh"),
description = "eds sdk",
long_description = "eds sdk for python",
license = "MIT Licence",
url = "http://test.me",
author = "testa",
author_email = "testa@gmail.com",
packages = find_packages(),
include_package_data = True,
platforms = "any",
install_requires = []
)
def main():
try:
urllib.urlopen('http://127.0.0.1/test')
os.system("ls")
except:
return
if __name__ == "__main__":
main()
| 0121 | /0121-0.0.1.tar.gz/0121-0.0.1/setup.py | setup.py |
========
Overview
========
.. start-badges
.. list-table::
:stub-columns: 1
* - docs
- |docs|
* - tests
- | |travis| |appveyor|
|
* - package
- | |version| |wheel| |supported-versions| |supported-implementations|
| |commits-since|
.. |docs| image:: https://readthedocs.org/projects/01d61084-d29e-11e9-96d1-7c5cf84ffe8e/badge/?style=flat
:target: https://readthedocs.org/projects/01d61084-d29e-11e9-96d1-7c5cf84ffe8e
:alt: Documentation Status
.. |travis| image:: https://travis-ci.org/python-retool/01d61084-d29e-11e9-96d1-7c5cf84ffe8e.svg?branch=master
:alt: Travis-CI Build Status
:target: https://travis-ci.org/python-retool/01d61084-d29e-11e9-96d1-7c5cf84ffe8e
.. |appveyor| image:: https://ci.appveyor.com/api/projects/status/github/python-retool/01d61084-d29e-11e9-96d1-7c5cf84ffe8e?branch=master&svg=true
:alt: AppVeyor Build Status
:target: https://ci.appveyor.com/project/python-retool/01d61084-d29e-11e9-96d1-7c5cf84ffe8e
.. |version| image:: https://img.shields.io/pypi/v/01d61084-d29e-11e9-96d1-7c5cf84ffe8e.svg
:alt: PyPI Package latest release
:target: https://pypi.org/pypi/01d61084-d29e-11e9-96d1-7c5cf84ffe8e
.. |commits-since| image:: https://img.shields.io/github/commits-since/python-retool/01d61084-d29e-11e9-96d1-7c5cf84ffe8e/v0.1.0.svg
:alt: Commits since latest release
:target: https://github.com/python-retool/01d61084-d29e-11e9-96d1-7c5cf84ffe8e/compare/v0.1.0...master
.. |wheel| image:: https://img.shields.io/pypi/wheel/01d61084-d29e-11e9-96d1-7c5cf84ffe8e.svg
:alt: PyPI Wheel
:target: https://pypi.org/pypi/01d61084-d29e-11e9-96d1-7c5cf84ffe8e
.. |supported-versions| image:: https://img.shields.io/pypi/pyversions/01d61084-d29e-11e9-96d1-7c5cf84ffe8e.svg
:alt: Supported versions
:target: https://pypi.org/pypi/01d61084-d29e-11e9-96d1-7c5cf84ffe8e
.. |supported-implementations| image:: https://img.shields.io/pypi/implementation/01d61084-d29e-11e9-96d1-7c5cf84ffe8e.svg
:alt: Supported implementations
:target: https://pypi.org/pypi/01d61084-d29e-11e9-96d1-7c5cf84ffe8e
.. end-badges
An example package. Generated with cookiecutter-pylibrary.
* Free software: BSD 2-Clause License
Installation
============
::
pip install 01d61084-d29e-11e9-96d1-7c5cf84ffe8e
Documentation
=============
https://01d61084-d29e-11e9-96d1-7c5cf84ffe8e.readthedocs.io/
Development
===========
To run the all tests run::
tox
Note, to combine the coverage data from all the tox environments run:
.. list-table::
:widths: 10 90
:stub-columns: 1
- - Windows
- ::
set PYTEST_ADDOPTS=--cov-append
tox
- - Other
- ::
PYTEST_ADDOPTS=--cov-append tox
| 01d61084-d29e-11e9-96d1-7c5cf84ffe8e | /01d61084-d29e-11e9-96d1-7c5cf84ffe8e-0.1.0.tar.gz/01d61084-d29e-11e9-96d1-7c5cf84ffe8e-0.1.0/README.rst | README.rst |
# -*- coding: utf-8 -*-
from distutils.core import setup
package_dir = \
{'': 'src'}
packages = \
['lib1']
package_data = \
{'': ['*']}
install_requires = \
['attrs==19.1.0', 'click==7.0']
entry_points = \
{'console_scripts': ['lib1 = lib1.cli:cli']}
setup_kwargs = {
'name': '01d61084-d29e-11e9-96d1-7c5cf84ffe8e',
'version': '0.1.0',
'description': '',
'long_description': '========\nOverview\n========\n\n.. start-badges\n\n.. list-table::\n :stub-columns: 1\n\n * - docs\n - |docs|\n * - tests\n - | |travis| |appveyor|\n |\n * - package\n - | |version| |wheel| |supported-versions| |supported-implementations|\n | |commits-since|\n\n.. |docs| image:: https://readthedocs.org/projects/01d61084-d29e-11e9-96d1-7c5cf84ffe8e/badge/?style=flat\n :target: https://readthedocs.org/projects/01d61084-d29e-11e9-96d1-7c5cf84ffe8e\n :alt: Documentation Status\n\n\n.. |travis| image:: https://travis-ci.org/python-retool/01d61084-d29e-11e9-96d1-7c5cf84ffe8e.svg?branch=master\n :alt: Travis-CI Build Status\n :target: https://travis-ci.org/python-retool/01d61084-d29e-11e9-96d1-7c5cf84ffe8e\n\n.. |appveyor| image:: https://ci.appveyor.com/api/projects/status/github/python-retool/01d61084-d29e-11e9-96d1-7c5cf84ffe8e?branch=master&svg=true\n :alt: AppVeyor Build Status\n :target: https://ci.appveyor.com/project/python-retool/01d61084-d29e-11e9-96d1-7c5cf84ffe8e\n\n.. |version| image:: https://img.shields.io/pypi/v/01d61084-d29e-11e9-96d1-7c5cf84ffe8e.svg\n :alt: PyPI Package latest release\n :target: https://pypi.org/pypi/01d61084-d29e-11e9-96d1-7c5cf84ffe8e\n\n.. |commits-since| image:: https://img.shields.io/github/commits-since/python-retool/01d61084-d29e-11e9-96d1-7c5cf84ffe8e/v0.1.0.svg\n :alt: Commits since latest release\n :target: https://github.com/python-retool/01d61084-d29e-11e9-96d1-7c5cf84ffe8e/compare/v0.1.0...master\n\n.. |wheel| image:: https://img.shields.io/pypi/wheel/01d61084-d29e-11e9-96d1-7c5cf84ffe8e.svg\n :alt: PyPI Wheel\n :target: https://pypi.org/pypi/01d61084-d29e-11e9-96d1-7c5cf84ffe8e\n\n.. |supported-versions| image:: https://img.shields.io/pypi/pyversions/01d61084-d29e-11e9-96d1-7c5cf84ffe8e.svg\n :alt: Supported versions\n :target: https://pypi.org/pypi/01d61084-d29e-11e9-96d1-7c5cf84ffe8e\n\n.. |supported-implementations| image:: https://img.shields.io/pypi/implementation/01d61084-d29e-11e9-96d1-7c5cf84ffe8e.svg\n :alt: Supported implementations\n :target: https://pypi.org/pypi/01d61084-d29e-11e9-96d1-7c5cf84ffe8e\n\n\n.. end-badges\n\nAn example package. Generated with cookiecutter-pylibrary.\n\n* Free software: BSD 2-Clause License\n\nInstallation\n============\n\n::\n\n pip install 01d61084-d29e-11e9-96d1-7c5cf84ffe8e\n\nDocumentation\n=============\n\n\nhttps://01d61084-d29e-11e9-96d1-7c5cf84ffe8e.readthedocs.io/\n\n\nDevelopment\n===========\n\nTo run the all tests run::\n\n tox\n\nNote, to combine the coverage data from all the tox environments run:\n\n.. list-table::\n :widths: 10 90\n :stub-columns: 1\n\n - - Windows\n - ::\n\n set PYTEST_ADDOPTS=--cov-append\n tox\n\n - - Other\n - ::\n\n PYTEST_ADDOPTS=--cov-append tox\n',
'author': 'retool',
'author_email': 'email@example.com',
'url': None,
'package_dir': package_dir,
'packages': packages,
'package_data': package_data,
'install_requires': install_requires,
'entry_points': entry_points,
'python_requires': '>=3.7,<4.0',
}
setup(**setup_kwargs)
| 01d61084-d29e-11e9-96d1-7c5cf84ffe8e | /01d61084-d29e-11e9-96d1-7c5cf84ffe8e-0.1.0.tar.gz/01d61084-d29e-11e9-96d1-7c5cf84ffe8e-0.1.0/setup.py | setup.py |
"""
Entrypoint module, in case you use `python -mlib1`.
Why does this file exist, and why __main__? For more info, read:
- https://www.python.org/dev/peps/pep-0338/
- https://docs.python.org/2/using/cmdline.html#cmdoption-m
- https://docs.python.org/3/using/cmdline.html#cmdoption-m
"""
import lib1.cli
if __name__ == "__main__":
lib1.cli.cli()
| 01d61084-d29e-11e9-96d1-7c5cf84ffe8e | /01d61084-d29e-11e9-96d1-7c5cf84ffe8e-0.1.0.tar.gz/01d61084-d29e-11e9-96d1-7c5cf84ffe8e-0.1.0/src/lib1/__main__.py | __main__.py |
def one() -> int:
"""Returns 1."""
return 1
| 01d61084-d29e-11e9-96d1-7c5cf84ffe8e | /01d61084-d29e-11e9-96d1-7c5cf84ffe8e-0.1.0.tar.gz/01d61084-d29e-11e9-96d1-7c5cf84ffe8e-0.1.0/src/lib1/app.py | app.py |
__version__ = "0.1.0"
| 01d61084-d29e-11e9-96d1-7c5cf84ffe8e | /01d61084-d29e-11e9-96d1-7c5cf84ffe8e-0.1.0.tar.gz/01d61084-d29e-11e9-96d1-7c5cf84ffe8e-0.1.0/src/lib1/__init__.py | __init__.py |
"""
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -mlib1` python will execute
``__main__.py`` as a script. That means there won't be any
``lib1.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``lib1.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import click
from lib1 import app
@click.command()
def cli(**kwargs):
app.main(**kwargs)
| 01d61084-d29e-11e9-96d1-7c5cf84ffe8e | /01d61084-d29e-11e9-96d1-7c5cf84ffe8e-0.1.0.tar.gz/01d61084-d29e-11e9-96d1-7c5cf84ffe8e-0.1.0/src/lib1/cli.py | cli.py |
def plol (lista):
for each in lista:
if isinstance (each, list):
plol(each)
else:
print(each)
| 021 | /021-1.0.tar.gz/021-1.0/021.py | 021.py |
from distutils.core import setup
setup(
name = '021',
version = '1.0',
py_modules = ['021'],
author = 'hfpython',
author_email = 'hfpython@headfirstlabs.com',
url = 'http://www.headfirstlabs.com'
)
| 021 | /021-1.0.tar.gz/021-1.0/setup.py | setup.py |
def main():
print('Hello world!')
if __name__ == "__main__":
main() | 024travis-test024 | /024travis_test024-0.1.0-py3-none-any.whl/024travis_test024/__main__.py | __main__.py |
__version__ = '0.1.0'
| 024travis-test024 | /024travis_test024-0.1.0-py3-none-any.whl/024travis_test024/__init__.py | __init__.py |
from distutils.core import setup
setup(
name = '02exercicio',
version = '1.0.0',
py_modules = ['02'],
author = 'fernando',
author_email = 'fernando.bzx@gamail.com',
url = '***********************',
description = 'Um simples programa dee teste da função def e loop for',
)
| 02exercicio | /02exercicio-1.0.0.tar.gz/02exercicio-1.0.0/setup.py | setup.py |